Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: stable/12/ObsoleteFiles.inc
===================================================================
--- stable/12/ObsoleteFiles.inc (revision 356464)
+++ stable/12/ObsoleteFiles.inc (revision 356465)
@@ -1,11245 +1,11481 @@
#
# $FreeBSD$
#
# This file lists old files (OLD_FILES), libraries (OLD_LIBS) and
# directories (OLD_DIRS) which should get removed at an update. Recently
# removed entries first (with the date as a comment). Dynamic libraries are
# special cased (OLD_LIBS). Static libraries or the generic links to
# the dynamic libraries (lib*.so) should (if you don't know why to make an
# exception, make this a "must") be viewed as normal files (OLD_FILES).
#
# In case of a complete directory hierarchy the sorting is in depth first
# order.
#
# The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last.
#
# Before you commit changes to this file please check if any entries in
# tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following
# command tells which files are listed more than once regardless of some
# architecture specific conditionals, so you can not blindly trust the
# output:
# ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \
# grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \
# sort | uniq -d
#
# To find regular duplicates not dependent on optional components, you can
# also use something that will not give you false positives, e.g.:
# for t in `make -V TARGETS universe`; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \
# -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
#
# For optional components, you can use the following to see if some entries
# in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc
# for o in tools/build/options/WITH*; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \
# -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
+# 20200107: new clang import which bumps version from 9.0.0 to 9.0.1.
+OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/algorithm
+OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/complex
+OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/new
+OLD_DIRS+=usr/lib/clang/9.0.0/include/cuda_wrappers
+OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math_declares.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/cmath
+OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/math.h
+OLD_DIRS+=usr/lib/clang/9.0.0/include/openmp_wrappers
+OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/emmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mm_malloc.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/xmmintrin.h
+OLD_DIRS+=usr/lib/clang/9.0.0/include/ppc_wrappers
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/allocator_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/asan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/common_interface_defs.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/coverage_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/dfsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/hwasan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/linux_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/lsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/msan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/netbsd_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/scudo_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface_atomic.h
+OLD_DIRS+=usr/lib/clang/9.0.0/include/sanitizer
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_builtin_vars.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_cmath.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_complex_builtins.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_device_functions.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_intrinsics.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_libdevice_declares.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_math_forward_declares.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_runtime_wrapper.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__stddef_max_align_t.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_aes.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_pclmul.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/adxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/altivec.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ammintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/arm64intr.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/arm_acle.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/arm_fp16.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/arm_neon.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/armintr.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bf16intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bitalgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512cdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512dqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512erintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512fintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmavlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512pfintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmiintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmivlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbf16intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbitalgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlcdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vldqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvbmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvnniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvp2intersectintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vnniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vp2intersectintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqvlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/avxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/bmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/bmiintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/cetintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/cldemoteintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/clflushoptintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/clwbintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/clzerointrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/cpuid.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/emmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/enqcmdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/f16cintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/fma4intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/fmaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/fxsrintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/gfniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/htmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/htmxlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ia32intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/immintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/invpcidintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/lwpintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/lzcntintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/mm3dnow.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/mm_malloc.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/mmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/module.modulemap
+OLD_FILES+=usr/lib/clang/9.0.0/include/movdirintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/msa.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/mwaitxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/nmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c-base.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/pconfigintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/pkuintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/pmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/popcntintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/prfchwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/ptwriteintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/rdseedintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/rtmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/s390intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/sgxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/shaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/smmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/tbmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/tmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/vadefs.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/vaesintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/vecintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/vpclmulqdqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/waitpkgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/wbnoinvdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/wmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/x86intrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xopintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xsavecintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveoptintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xsavesintrin.h
+OLD_FILES+=usr/lib/clang/9.0.0/include/xtestintrin.h
+OLD_DIRS+=usr/lib/clang/9.0.0/include
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.so
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.so
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.so
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-powerpc.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-powerpc64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
+OLD_DIRS+=usr/lib/clang/9.0.0/lib/freebsd
+OLD_DIRS+=usr/lib/clang/9.0.0/lib
+OLD_DIRS+=usr/lib/clang/9.0.0
# 20200107: new clang import which bumps version from 8.0.1 to 9.0.0.
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/8.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/8.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/8.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/8.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/8.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/8.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/8.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/8.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/8.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/8.0.1/include
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/8.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/8.0.1/lib
OLD_DIRS+=usr/lib/clang/8.0.1
# 20200107: libc++ 9.0.0 removed some experimental files
OLD_FILES+=usr/include/c++/v1/experimental/any
OLD_FILES+=usr/include/c++/v1/experimental/chrono
OLD_FILES+=usr/include/c++/v1/experimental/numeric
OLD_FILES+=usr/include/c++/v1/experimental/optional
OLD_FILES+=usr/include/c++/v1/experimental/ratio
OLD_FILES+=usr/include/c++/v1/experimental/string_view
OLD_FILES+=usr/include/c++/v1/experimental/system_error
OLD_FILES+=usr/include/c++/v1/experimental/tuple
OLD_FILES+=usr/lib/libc++fs.a
OLD_FILES+=usr/lib32/libc++fs.a
# 20191003: Remove useless ZFS tests
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_013_neg.ksh
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_014_neg.ksh
OLD_FILES+=usr/tests/sys/cddl/zfs/tests/cli_root/zpool_create/zpool_create_016_pos.ksh
# 20190811: sys/pwm.h renamed to dev/pwmc.h and pwm(9) removed
OLD_FILES+=usr/include/sys/pwm.h usr/share/man/man9/pwm.9
# 20190723: new clang import which bumps version from 8.0.0 to 8.0.1.
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/8.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/8.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/8.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/8.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/8.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/8.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/8.0.0/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/8.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/8.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/8.0.0/include
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/8.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/8.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/8.0.0/lib
OLD_DIRS+=usr/lib/clang/8.0.0
# 20190509: tests/sys/opencrypto requires the net/py-dpkt package.
OLD_FILES+=usr/tests/sys/opencrypto/dpkt.py
OLD_FILES+=usr/tests/sys/opencrypto/dpkt.pyc
# 20190412: new libc++ import which bumps version from 7.0.1 to 8.0.0.
OLD_FILES+=usr/include/c++/v1/experimental/dynarray
# 20190412: new clang import which bumps version from 7.0.1 to 8.0.0.
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/netbsd_syscall_hooks.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/7.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_device_functions.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_libdevice_declares.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/7.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_fp16.h
OLD_FILES+=usr/lib/clang/7.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/7.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cldemoteintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/7.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/invpcidintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/7.0.1/include/movdirintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/7.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pconfigintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/ptwriteintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/sgxintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/waitpkgintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/wbnoinvdintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/7.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/7.0.1/include
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/7.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/7.0.1/lib
OLD_DIRS+=usr/lib/clang/7.0.1
# 20190216: new clang import which bumps version from 6.0.1 to 7.0.1.
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.1/include
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.1/lib
OLD_DIRS+=usr/lib/clang/6.0.1
# 20181116: Rename test file.
OLD_FILES+=usr/tests/sys/netinet/reuseport_lb
# 20181112: Cleanup old libcap_dns.
OLD_LIBS+=lib/casper/libcap_dns.so.1
OLD_LIBS+=usr/lib32/libcap_dns.so.1
# 20181030: malloc_domain(9) KPI change
OLD_FILES+=usr/share/man/man9/malloc_domain.9.gz
# 20181025: OpenSSL libraries version bump to avoid conflict with ports
OLD_LIBS+=lib/libcrypto.so.9
OLD_LIBS+=usr/lib/libssl.so.9
OLD_LIBS+=usr/lib32/libcrypto.so.9
OLD_LIBS+=usr/lib32/libssl.so.9
# 20181015: Stale libcasper(3) files following r329452
OLD_LIBS+=lib/casper/libcap_sysctl.so.0
OLD_LIBS+=lib/casper/libcap_grp.so.0
OLD_LIBS+=lib/casper/libcap_pwd.so.0
OLD_LIBS+=lib/casper/libcap_random.so.0
OLD_LIBS+=lib/casper/libcap_dns.so.0
OLD_LIBS+=lib/casper/libcap_syslog.so.0
OLD_LIBS+=usr/lib32/libcap_sysctl.so.0
OLD_LIBS+=usr/lib32/libcap_grp.so.0
OLD_LIBS+=usr/lib32/libcap_pwd.so.0
OLD_LIBS+=usr/lib32/libcap_random.so.0
OLD_LIBS+=usr/lib32/libcap_dns.so.0
OLD_LIBS+=usr/lib32/libcap_syslog.so.0
# 20181012: rename of ixlv(4) to iavf(4)
OLD_FILES+=usr/share/man/man4/ixlv.4.gz
# 20181009: OpenSSL 1.1.1
OLD_FILES+=usr/include/openssl/des_old.h
OLD_FILES+=usr/include/openssl/dso.h
OLD_FILES+=usr/include/openssl/krb5_asn.h
OLD_FILES+=usr/include/openssl/kssl.h
OLD_FILES+=usr/include/openssl/pqueue.h
OLD_FILES+=usr/include/openssl/ssl23.h
OLD_FILES+=usr/include/openssl/ui_compat.h
OLD_FILES+=usr/lib/debug/usr/lib/engines/lib4758cca.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libaep.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libatalla.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libcapi.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libchil.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libcswift.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libgost.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libnuron.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libsureware.so.debug
OLD_FILES+=usr/lib/debug/usr/lib/engines/libubsec.so.debug
OLD_FILES+=usr/share/openssl/man/man1/dss1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md4.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md5.1.gz
OLD_FILES+=usr/share/openssl/man/man1/mdc2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/ripemd160.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha224.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha256.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha384.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha512.1.gz
OLD_FILES+=usr/share/openssl/man/man1/x509v3_config.1.gz
OLD_FILES+=usr/share/openssl/man/man3/ASN1_STRING_length_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_get_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_set_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_MONT_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_RECP_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strndup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_cert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cmp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_current.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_get_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_hash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_set_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_destroy_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_get_new_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_lock.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_num_locks.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_create_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_destroy_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_lock_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_locking_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_ede3_cbcm_encrypt.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_read.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_write.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_get_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_insert_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_POINT_set_Jprojective_coordinates.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ERR_load_UI_strings.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MAX_MD_SIZE.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_create.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_destroy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEVP_PKEY_CTX_set_app_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_CTX_set_rsa_rsa_keygen_bits.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_get_default_digest.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss1.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/OPENSSL_ia32cap_loc.3.gz
OLD_FILES+=usr/share/openssl/man/man3/PEM.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RAND_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_PKCS1_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_null_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_custom_cli_ext.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_default_read_ahead.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_ecdh_auto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_SESSION_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_add_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_flush_sessions.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_accept_state.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_msg_callback_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_remove_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_ecdh_auto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_add_ssl_algorithms.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_version.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_client_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_server_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_set_chain.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_trusted_stack.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/blowfish.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_check_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_cmp_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_div_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_dump.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_fix_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_internal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_part_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_print.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_low.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_max.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sub_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_wexpand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/buffer.3.gz
OLD_FILES+=usr/share/openssl/man/man3/crypto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPrivate_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_PKCS8PrivateKey.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Private_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_2passwords.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dh.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ec.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ecdsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/engine.3.gz
OLD_FILES+=usr/share/openssl/man/man3/err.3.gz
OLD_FILES+=usr/share/openssl/man/man3/evp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/hmac.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_delete.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_error.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_free.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_insert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_new.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_retrieve.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lhash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/md5.3.gz
OLD_FILES+=usr/share/openssl/man/man3/mdc2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/pem.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rc4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ripemd.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ssl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/threads.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui_compat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/x509.3.gz
OLD_LIBS+=lib/libcrypto.so.8
OLD_LIBS+=usr/lib/engines/lib4758cca.so
OLD_LIBS+=usr/lib/engines/libaep.so
OLD_LIBS+=usr/lib/engines/libatalla.so
OLD_LIBS+=usr/lib/engines/libcapi.so
OLD_LIBS+=usr/lib/engines/libchil.so
OLD_LIBS+=usr/lib/engines/libcswift.so
OLD_LIBS+=usr/lib/engines/libgost.so
OLD_LIBS+=usr/lib/engines/libnuron.so
OLD_LIBS+=usr/lib/engines/libsureware.so
OLD_LIBS+=usr/lib/engines/libubsec.so
OLD_LIBS+=usr/lib/libssl.so.8
OLD_LIBS+=usr/lib32/libcrypto.so.8
OLD_LIBS+=usr/lib32/lib4758cca.so
OLD_LIBS+=usr/lib32/libaep.so
OLD_LIBS+=usr/lib32/libatalla.so
OLD_LIBS+=usr/lib32/libcapi.so
OLD_LIBS+=usr/lib32/libchil.so
OLD_LIBS+=usr/lib32/libcswift.so
OLD_LIBS+=usr/lib32/libgost.so
OLD_LIBS+=usr/lib32/libnuron.so
OLD_LIBS+=usr/lib32/libsureware.so
OLD_LIBS+=usr/lib32/libubsec.so
OLD_LIBS+=usr/lib32/libssl.so.8
# 20180824: libbe(3) SHLIBDIR fixed to reflect correct location
OLD_LIBS+=usr/lib/libbe.so.1
# 20180819: Remove deprecated arc4random(3) stir/addrandom interfaces
OLD_FILES+=usr/share/man/man3/arc4random_addrandom.3.gz
OLD_FILES+=usr/share/man/man3/arc4random_stir.3.gz
# 20180819: send-pr(1) placeholder removal
OLD_FILES+=usr/bin/send-pr
# 20180725: Cleanup old libcasper.so.0
OLD_LIBS+=lib/libcasper.so.0
OLD_LIBS+=usr/lib32/libcasper.so.0
# 20180722: indent(1) option renamed, test files follow
OLD_FILES+=usr/bin/indent/tests/nsac.0
OLD_FILES+=usr/bin/indent/tests/nsac.0.pro
OLD_FILES+=usr/bin/indent/tests/nsac.0.stdout
OLD_FILES+=usr/bin/indent/tests/sac.0
OLD_FILES+=usr/bin/indent/tests/sac.0.pro
OLD_FILES+=usr/bin/indent/tests/sac.0.stdout
# 20180721: move of libmlx5.so.1 and libibverbs.so.1
OLD_LIBS+=usr/lib/libmlx5.so.1
OLD_LIBS+=usr/lib/libibverbs.so.1
# 20180710: old numa cleanup
OLD_FILES+=usr/include/sys/numa.h
OLD_FILES+=usr/share/man/man2/numa_getaffinity.2.gz
OLD_FILES+=usr/share/man/man2/numa_setaffinity.2.gz
OLD_FILES+=usr/share/man/man1/numactl.1.gz
OLD_FILES+=usr/bin/numactl
# 20180630: new clang import which bumps version from 6.0.0 to 6.0.1.
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.0/include
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.0/lib
OLD_DIRS+=usr/lib/clang/6.0.0
# 20180615: asf(8) removed
OLD_FILES+=usr/sbin/asf
OLD_FILES+=usr/share/man/man8/asf.8.gz
# 20180609: obsolete libc++ files missed from the 5.0.0 import
OLD_FILES+=usr/include/c++/v1/__refstring
OLD_FILES+=usr/include/c++/v1/__undef_min_max
OLD_FILES+=usr/include/c++/v1/tr1/__refstring
OLD_FILES+=usr/include/c++/v1/tr1/__undef_min_max
# 20180607: remove nls support from grep
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat
# 20180528: libpcap update removed header file
OLD_FILES+=usr/include/pcap/export-defs.h
# 20180517: retire vxge
OLD_FILES+=usr/share/man/man4/if_vxge.4.gz
OLD_FILES+=usr/share/man/man4/vxge.4.gz
# 20180512: Rename Unbound tools
OLD_FILES+=usr/sbin/unbound
OLD_FILES+=usr/sbin/unbound-anchor
OLD_FILES+=usr/sbin/unbound-checkconf
OLD_FILES+=usr/sbin/unbound-control
OLD_FILES+=usr/share/man/man5/unbound.conf.5.gz
OLD_FILES+=usr/share/man/man8/unbound-anchor.8.gz
OLD_FILES+=usr/share/man/man8/unbound-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/unbound-control.8.gz
OLD_FILES+=usr/share/man/man8/unbound.8.gz
# 20180508: retire nxge
OLD_FILES+=usr/share/man/man4/if_nxge.4.gz
OLD_FILES+=usr/share/man/man4/nxge.4.gz
# 20180505: rhosts
OLD_FILES+=usr/share/skel/dot.rhosts
# 20180502: retire ixgb
OLD_FILES+=usr/share/man/man4/if_ixgb.4.gz
OLD_FILES+=usr/share/man/man4/ixgb.4.gz
# 20180501: retire lmc
OLD_FILES+=usr/include/dev/lmc/if_lmc.h
OLD_DIRS+=usr/include/dev/lmc
OLD_FILES+=usr/sbin/lmcconfig
OLD_FILES+=usr/share/man/man4/lmc.4.gz
OLD_FILES+=usr/share/man/man4/if_lmc.4.gz
OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz
# 20180417: remove fuswintr and suswintr
OLD_FILES+=usr/share/man/man9/fuswintr.9.gz
OLD_FILES+=usr/share/man/man9/suswintr.9.gz
# 20180413: remove Arcnet support
OLD_FILES+=usr/include/net/if_arc.h
OLD_FILES+=usr/share/man/man4/cm.4.gz
# 20180409: remove FDDI support
OLD_FILES+=usr/include/net/fddi.h
OLD_FILES+=usr/share/man/man4/fpa.4.gz
# 20180319: remove /boot/overlays, replaced by /boot/dtb/overlays
OLD_DIRS+=boot/overlays
# 20180311: remove sys/sys/i386/include/pcaudioio.h
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pcaudioio.h
.endif
# 20180310: remove sys/sys/dataacq.h
OLD_FILES+=usr/include/sys/dataacq.h
# 20180306: remove DTrace scripts made obsolete by dwatch(1)
OLD_FILES+=usr/share/dtrace/watch_execve
OLD_FILES+=usr/share/dtrace/watch_kill
OLD_FILES+=usr/share/dtrace/watch_vop_remove
# 20180212: move devmatch
OLD_FILES+=usr/sbin/devmatch
# 20180211: remove usb.conf
OLD_FILES+=etc/devd/usb.conf
# 20180208: remove c_rehash(1)
OLD_FILES+=usr/share/openssl/man/man1/c_rehash.1.gz
# 20180206: remove gdbtui
OLD_FILES+=usr/bin/gdbtui
# 20180201: Obsolete forth files
OLD_FILES+=boot/pcibios.4th
# 20180114: new clang import which bumps version from 5.0.1 to 6.0.0.
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.1/include
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.1/lib
OLD_DIRS+=usr/lib/clang/5.0.1
# 20180109: Remove vestiges of digi(4) driver
OLD_FILES+=usr/include/sys/digiio.h
OLD_FILES+=usr/sbin/digictl
OLD_FILES+=usr/share/man/man8/digictl.8.gz
# 20180107: Convert remaining geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/nokey_test.sh
OLD_FILES+=tests/sys/geom/class/eli/readonly_test.sh
# 20180106: Convert most geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/attach_d_test.sh
OLD_FILES+=tests/sys/geom/class/eli/configure_b_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/detach_l_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_J_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_alias_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_i_P_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_copy_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_data_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_hmac_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_d_test.sh
# 20171230: Remove /etc/skel from mtree
OLD_DIRS+=/etc/skel
# 20171208: Remove basename_r(3)
OLD_FILES+=usr/share/man/man3/basename_r.3.gz
# 20171204: Move fdformat man page from volume 1 to volume 8.
OLD_FILES+=usr/share/man/man1/fdformat.1.gz
# 20171203: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.4
OLD_LIBS+=usr/lib32/libproc.so.4
# 20171203: new clang import which bumps version from 5.0.0 to 5.0.1.
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.0/include
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.0/lib
OLD_DIRS+=usr/lib/clang/5.0.0
# 20171118: Remove old etc casper files
OLD_FILES+=etc/casper/system.dns
OLD_FILES+=etc/casper/system.grp
OLD_FILES+=etc/casper/system.pwd
OLD_FILES+=etc/casper/system.random
OLD_FILES+=etc/casper/system.sysctl
OLD_DIRS+=etc/casper
# 20171116: lint(1) removal
OLD_FILES+=usr/bin/lint
OLD_FILES+=usr/libexec/lint1
OLD_FILES+=usr/libexec/lint2
OLD_FILES+=usr/libdata/lint/llib-lposix.ln
OLD_FILES+=usr/libdata/lint/llib-lstdc.ln
OLD_FILES+=usr/share/man/man1/lint.1.gz
OLD_FILES+=usr/share/man/man7/lint.7.gz
OLD_DIRS+=usr/libdata/lint
# 20171114: Removal of all fortune datfiles other than freebsd-tips
OLD_FILES+=usr/share/games/fortune/fortunes
OLD_FILES+=usr/share/games/fortune/fortunes.dat
OLD_FILES+=usr/share/games/fortune/gerrold.limerick
OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat
OLD_FILES+=usr/share/games/fortune/limerick
OLD_FILES+=usr/share/games/fortune/limerick.dat
OLD_FILES+=usr/share/games/fortune/murphy
OLD_FILES+=usr/share/games/fortune/murphy-o
OLD_FILES+=usr/share/games/fortune/murphy-o.dat
OLD_FILES+=usr/share/games/fortune/murphy.dat
OLD_FILES+=usr/share/games/fortune/startrek
OLD_FILES+=usr/share/games/fortune/startrek.dat
OLD_FILES+=usr/share/games/fortune/zippy
OLD_FILES+=usr/share/games/fortune/zippy.dat
# 20171112: Removal of eqnchar definition
OLD_FILES+=usr/share/misc/eqnchar
# 20171110: Removal of mailaddr man page
OLD_FILES+=usr/share/man/man7/mailaddr.7.gz
# 20171108: badsect(8) removal
OLD_FILES+=sbin/badsect
OLD_FILES+=rescue/badsect
OLD_FILES+=usr/share/man/man8/badsect.8.gz
# 20171105: fixing lib/libclang_rt CRTARCH for arm:armv[67].
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_LIBS+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
.endif
# 20171104: libcap_random should be in /lib not in /usr/lib
OLD_LIBS+=usr/lib/libcap_random.so.0
# 20171104: Casper can work only as shared library
OLD_FILES+=usr/lib/libcap_dns.a
OLD_FILES+=usr/lib/libcap_dns_p.a
OLD_FILES+=usr/lib/libcap_grp.a
OLD_FILES+=usr/lib/libcap_grp_p.a
OLD_FILES+=usr/lib/libcap_pwd.a
OLD_FILES+=usr/lib/libcap_pwd_p.a
OLD_FILES+=usr/lib/libcap_random.a
OLD_FILES+=usr/lib/libcap_random_p.a
OLD_FILES+=usr/lib/libcap_sysctl.a
OLD_FILES+=usr/lib/libcap_sysctl_p.a
OLD_FILES+=usr/lib/libcasper.a
OLD_FILES+=usr/lib/libcasper_p.a
OLD_FILES+=usr/lib32/libcap_dns.a
OLD_FILES+=usr/lib32/libcap_dns_p.a
OLD_FILES+=usr/lib32/libcap_grp.a
OLD_FILES+=usr/lib32/libcap_grp_p.a
OLD_FILES+=usr/lib32/libcap_pwd.a
OLD_FILES+=usr/lib32/libcap_pwd_p.a
OLD_FILES+=usr/lib32/libcap_random.a
OLD_FILES+=usr/lib32/libcap_random_p.a
OLD_FILES+=usr/lib32/libcap_sysctl.a
OLD_FILES+=usr/lib32/libcap_sysctl_p.a
OLD_FILES+=usr/lib32/libcasper.a
OLD_FILES+=usr/lib32/libcasper_p.a
# 20171031: Removal of adding_user man page
OLD_FILES+=usr/share/man/man7/adding_user.7.gz
# 20171031: Disconnected libpathconv tests
OLD_DIRS+=usr/tests/lib/libpathconv
# 20171017: Removal of mbpool(9)
OLD_FILES+=usr/include/sys/mbpool.h
OLD_FILES+=usr/share/man/man9/mbpool.9.gz
OLD_FILES+=usr/share/man/man9/mbp_destroy.9.gz
OLD_FILES+=usr/share/man/man9/mbp_alloc.9.gz
OLD_FILES+=usr/share/man/man9/mbp_ext_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_count.9.gz
OLD_FILES+=usr/share/man/man9/mbp_card_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get_keep.9.gz
OLD_FILES+=usr/share/man/man9/mbp_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get.9.gz
OLD_FILES+=usr/share/man/man9/mbp_create.9.gz
OLD_FILES+=usr/share/man/man9/mbp_sync.9.gz
# 20171010: Remove libstand
OLD_FILES+=usr/lib/libstand.a
OLD_FILES+=usr/lib/libstand_p.a
OLD_FILES+=usr/lib32/libstand.a
OLD_FILES+=usr/lib32/libstand_p.a
OLD_FILES+=usr/include/stand.h
OLD_FILES+=usr/share/man/man3/libstand.3.gz
# 20171003: remove RCMDS
OLD_FILES+=bin/rcp
OLD_FILES+=rescue/rcp
OLD_FILES+=usr/bin/rlogin
OLD_FILES+=usr/bin/rsh
OLD_FILES+=usr/libexec/rlogind
OLD_FILES+=usr/libexec/rshd
OLD_FILES+=usr/share/man/man1/rcp.1.gz
OLD_FILES+=usr/share/man/man1/rlogin.1.gz
OLD_FILES+=usr/share/man/man1/rsh.1.gz
OLD_FILES+=usr/share/man/man8/rlogind.8.gz
OLD_FILES+=usr/share/man/man8/rshd.8.gz
# 20170927: crshared
OLD_FILES+=usr/share/man/man9/crshared.9.gz
# 20170927: procctl
OLD_FILES+=usr/share/man/man8/procctl.8.gz
OLD_FILES+=usr/sbin/procctl
# 20170926: remove unneeded man aliases and locales directory
OLD_FILES+=usr/share/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/man/en.ISO8859-1/man2
OLD_FILES+=usr/share/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/man/en.ISO8859-1/man4
OLD_FILES+=usr/share/man/en.ISO8859-1/man5
OLD_FILES+=usr/share/man/en.ISO8859-1/man6
OLD_FILES+=usr/share/man/en.ISO8859-1/man7
OLD_FILES+=usr/share/man/en.ISO8859-1/man8
OLD_FILES+=usr/share/man/en.ISO8859-1/man9
OLD_DIRS+=usr/share/man/en.ISO8859-1
OLD_FILES+=usr/share/man/en.ISO8859-1/mandoc.db
OLD_FILES+=usr/share/man/en.UTF-8/man1
OLD_FILES+=usr/share/man/en.UTF-8/man2
OLD_FILES+=usr/share/man/en.UTF-8/man3
OLD_FILES+=usr/share/man/en.UTF-8/man4
OLD_FILES+=usr/share/man/en.UTF-8/man5
OLD_FILES+=usr/share/man/en.UTF-8/man6
OLD_FILES+=usr/share/man/en.UTF-8/man7
OLD_FILES+=usr/share/man/en.UTF-8/man8
OLD_FILES+=usr/share/man/en.UTF-8/man9
OLD_FILES+=usr/share/man/en.UTF-8/mandoc.db
OLD_DIRS+=usr/share/man/en.UTF-8
OLD_FILES+=usr/share/man/en.ISO8859-15
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/mandoc.db
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-15
OLD_DIRS+=usr/share/man/ja/man1
OLD_DIRS+=usr/share/man/ja/man2
OLD_DIRS+=usr/share/man/ja/man3
OLD_DIRS+=usr/share/man/ja/man4
OLD_DIRS+=usr/share/man/ja/man5
OLD_DIRS+=usr/share/man/ja/man6
OLD_DIRS+=usr/share/man/ja/man7
OLD_DIRS+=usr/share/man/ja/man8
OLD_DIRS+=usr/share/man/ja/man9
OLD_DIRS+=usr/share/man/ja
# 20170913: remove unneeded catman utility
OLD_FILES+=etc/periodic/weekly/330.catman
OLD_FILES+=usr/bin/catman
OLD_FILES+=usr/libexec/catman.local
OLD_FILES+=usr/share/man/man1/catman.1.gz
OLD_FILES+=usr/share/man/man8/catman.local.8.gz
OLD_DIRS+=usr/share/man/cat1
OLD_DIRS+=usr/share/man/cat2
OLD_DIRS+=usr/share/man/cat3
OLD_DIRS+=usr/share/man/cat4/amd64
OLD_DIRS+=usr/share/man/cat4/arm
OLD_DIRS+=usr/share/man/cat4/i386
OLD_DIRS+=usr/share/man/cat4/powerpc
OLD_DIRS+=usr/share/man/cat4/sparc64
OLD_DIRS+=usr/share/man/cat4
OLD_DIRS+=usr/share/man/cat5
OLD_DIRS+=usr/share/man/cat6
OLD_DIRS+=usr/share/man/cat7
OLD_DIRS+=usr/share/man/cat8/amd64
OLD_DIRS+=usr/share/man/cat8/arm
OLD_DIRS+=usr/share/man/cat8/i386
OLD_DIRS+=usr/share/man/cat8/powerpc
OLD_DIRS+=usr/share/man/cat8/sparc64
OLD_DIRS+=usr/share/man/cat8
OLD_DIRS+=usr/share/man/cat9
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat2
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat3
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat5
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat6
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat7
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat9
OLD_DIRS+=usr/share/man/en.UTF-8/cat1
OLD_DIRS+=usr/share/man/en.UTF-8/cat2
OLD_DIRS+=usr/share/man/en.UTF-8/cat3
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4
OLD_DIRS+=usr/share/man/en.UTF-8/cat5
OLD_DIRS+=usr/share/man/en.UTF-8/cat6
OLD_DIRS+=usr/share/man/en.UTF-8/cat7
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8
OLD_DIRS+=usr/share/man/en.UTF-8/cat9
OLD_DIRS+=usr/share/man/ja/cat1
OLD_DIRS+=usr/share/man/ja/cat2
OLD_DIRS+=usr/share/man/ja/cat3
OLD_DIRS+=usr/share/man/ja/cat4/amd64
OLD_DIRS+=usr/share/man/ja/cat4/arm
OLD_DIRS+=usr/share/man/ja/cat4/i386
OLD_DIRS+=usr/share/man/ja/cat4/powerpc
OLD_DIRS+=usr/share/man/ja/cat4/sparc64
OLD_DIRS+=usr/share/man/ja/cat4
OLD_DIRS+=usr/share/man/ja/cat5
OLD_DIRS+=usr/share/man/ja/cat6
OLD_DIRS+=usr/share/man/ja/cat7
OLD_DIRS+=usr/share/man/ja/cat8/amd64
OLD_DIRS+=usr/share/man/ja/cat8/arm
OLD_DIRS+=usr/share/man/ja/cat8/powerpc
OLD_DIRS+=usr/share/man/ja/cat8/sparc64
OLD_DIRS+=usr/share/man/ja/cat8
OLD_DIRS+=usr/share/man/ja/cat9
OLD_DIRS+=usr/share/openssl/man/cat1
OLD_DIRS+=usr/share/openssl/man/cat3
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat3
# 20170802: ksyms(4) ioctl interface was removed
OLD_FILES+=usr/include/sys/ksyms.h
# 20170729: the iicbus/pcf8563 driver is replaced with iicbus/nxprtc
OLD_FILES+=usr/include/dev/iicbus/pcf8563reg.h
# 20170722: new clang import which bumps version from 4.0.0 to 5.0.0.
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/4.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/4.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/4.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/4.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/4.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/4.0.0/include
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/4.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/4.0.0/lib
OLD_DIRS+=usr/lib/clang/4.0.0
OLD_FILES+=usr/bin/llvm-pdbdump
# 20170610: chown-f_test replaced by chown_test
OLD_FILES+=usr/tests/usr.sbin/chown/chown-f_test
# 20170609: drop obsolete manpage link (if_rtwn.ko -> rtwn.ko)
OLD_FILES+=usr/share/man/man4/if_rtwn.4.gz
# 20170531: removal of groff
OLD_FILES+=usr/bin/addftinfo
OLD_FILES+=usr/bin/afmtodit
OLD_FILES+=usr/bin/checknr
OLD_FILES+=usr/bin/colcrt
OLD_FILES+=usr/bin/eqn
OLD_FILES+=usr/bin/grn
OLD_FILES+=usr/bin/grodvi
OLD_FILES+=usr/bin/groff
OLD_FILES+=usr/bin/grog
OLD_FILES+=usr/bin/grolbp
OLD_FILES+=usr/bin/grolj4
OLD_FILES+=usr/bin/grops
OLD_FILES+=usr/bin/grotty
OLD_FILES+=usr/bin/hpftodit
OLD_FILES+=usr/bin/indxbib
OLD_FILES+=usr/bin/lkbib
OLD_FILES+=usr/bin/lookbib
OLD_FILES+=usr/bin/mmroff
OLD_FILES+=usr/bin/neqn
OLD_FILES+=usr/bin/nroff
OLD_FILES+=usr/bin/pfbtops
OLD_FILES+=usr/bin/pic
OLD_FILES+=usr/bin/post-grohtml
OLD_FILES+=usr/bin/pre-grohtml
OLD_FILES+=usr/bin/psroff
OLD_FILES+=usr/bin/refer
OLD_FILES+=usr/bin/tbl
OLD_FILES+=usr/bin/tfmtodit
OLD_FILES+=usr/bin/troff
OLD_FILES+=usr/bin/vgrind
OLD_FILES+=usr/libexec/vfontedpr
OLD_FILES+=usr/share/dict/eign
OLD_FILES+=usr/share/groff_font/devX100-12/CB
OLD_FILES+=usr/share/groff_font/devX100-12/CBI
OLD_FILES+=usr/share/groff_font/devX100-12/CI
OLD_FILES+=usr/share/groff_font/devX100-12/CR
OLD_FILES+=usr/share/groff_font/devX100-12/DESC
OLD_FILES+=usr/share/groff_font/devX100-12/HB
OLD_FILES+=usr/share/groff_font/devX100-12/HBI
OLD_FILES+=usr/share/groff_font/devX100-12/HI
OLD_FILES+=usr/share/groff_font/devX100-12/HR
OLD_FILES+=usr/share/groff_font/devX100-12/NB
OLD_FILES+=usr/share/groff_font/devX100-12/NBI
OLD_FILES+=usr/share/groff_font/devX100-12/NI
OLD_FILES+=usr/share/groff_font/devX100-12/NR
OLD_FILES+=usr/share/groff_font/devX100-12/S
OLD_FILES+=usr/share/groff_font/devX100-12/TB
OLD_FILES+=usr/share/groff_font/devX100-12/TBI
OLD_FILES+=usr/share/groff_font/devX100-12/TI
OLD_FILES+=usr/share/groff_font/devX100-12/TR
OLD_DIRS+=usr/share/groff_font/devX100-12
OLD_FILES+=usr/share/groff_font/devX100/CB
OLD_FILES+=usr/share/groff_font/devX100/CBI
OLD_FILES+=usr/share/groff_font/devX100/CI
OLD_FILES+=usr/share/groff_font/devX100/CR
OLD_FILES+=usr/share/groff_font/devX100/DESC
OLD_FILES+=usr/share/groff_font/devX100/HB
OLD_FILES+=usr/share/groff_font/devX100/HBI
OLD_FILES+=usr/share/groff_font/devX100/HI
OLD_FILES+=usr/share/groff_font/devX100/HR
OLD_FILES+=usr/share/groff_font/devX100/NB
OLD_FILES+=usr/share/groff_font/devX100/NBI
OLD_FILES+=usr/share/groff_font/devX100/NI
OLD_FILES+=usr/share/groff_font/devX100/NR
OLD_FILES+=usr/share/groff_font/devX100/S
OLD_FILES+=usr/share/groff_font/devX100/TB
OLD_FILES+=usr/share/groff_font/devX100/TBI
OLD_FILES+=usr/share/groff_font/devX100/TI
OLD_FILES+=usr/share/groff_font/devX100/TR
OLD_DIRS+=usr/share/groff_font/devX100
OLD_FILES+=usr/share/groff_font/devX75-12/CB
OLD_FILES+=usr/share/groff_font/devX75-12/CBI
OLD_FILES+=usr/share/groff_font/devX75-12/CI
OLD_FILES+=usr/share/groff_font/devX75-12/CR
OLD_FILES+=usr/share/groff_font/devX75-12/DESC
OLD_FILES+=usr/share/groff_font/devX75-12/HB
OLD_FILES+=usr/share/groff_font/devX75-12/HBI
OLD_FILES+=usr/share/groff_font/devX75-12/HI
OLD_FILES+=usr/share/groff_font/devX75-12/HR
OLD_FILES+=usr/share/groff_font/devX75-12/NB
OLD_FILES+=usr/share/groff_font/devX75-12/NBI
OLD_FILES+=usr/share/groff_font/devX75-12/NI
OLD_FILES+=usr/share/groff_font/devX75-12/NR
OLD_FILES+=usr/share/groff_font/devX75-12/S
OLD_FILES+=usr/share/groff_font/devX75-12/TB
OLD_FILES+=usr/share/groff_font/devX75-12/TBI
OLD_FILES+=usr/share/groff_font/devX75-12/TI
OLD_FILES+=usr/share/groff_font/devX75-12/TR
OLD_DIRS+=usr/share/groff_font/devX75-12
OLD_FILES+=usr/share/groff_font/devX75/CB
OLD_FILES+=usr/share/groff_font/devX75/CBI
OLD_FILES+=usr/share/groff_font/devX75/CI
OLD_FILES+=usr/share/groff_font/devX75/CR
OLD_FILES+=usr/share/groff_font/devX75/DESC
OLD_FILES+=usr/share/groff_font/devX75/HB
OLD_FILES+=usr/share/groff_font/devX75/HBI
OLD_FILES+=usr/share/groff_font/devX75/HI
OLD_FILES+=usr/share/groff_font/devX75/HR
OLD_FILES+=usr/share/groff_font/devX75/NB
OLD_FILES+=usr/share/groff_font/devX75/NBI
OLD_FILES+=usr/share/groff_font/devX75/NI
OLD_FILES+=usr/share/groff_font/devX75/NR
OLD_FILES+=usr/share/groff_font/devX75/S
OLD_FILES+=usr/share/groff_font/devX75/TB
OLD_FILES+=usr/share/groff_font/devX75/TBI
OLD_FILES+=usr/share/groff_font/devX75/TI
OLD_FILES+=usr/share/groff_font/devX75/TR
OLD_DIRS+=usr/share/groff_font/devX75
OLD_FILES+=usr/share/groff_font/devascii/B
OLD_FILES+=usr/share/groff_font/devascii/BI
OLD_FILES+=usr/share/groff_font/devascii/CW
OLD_FILES+=usr/share/groff_font/devascii/DESC
OLD_FILES+=usr/share/groff_font/devascii/I
OLD_FILES+=usr/share/groff_font/devascii/L
OLD_FILES+=usr/share/groff_font/devascii/R
OLD_FILES+=usr/share/groff_font/devascii/S
OLD_DIRS+=usr/share/groff_font/devascii
OLD_FILES+=usr/share/groff_font/devcp1047/B
OLD_FILES+=usr/share/groff_font/devcp1047/BI
OLD_FILES+=usr/share/groff_font/devcp1047/CW
OLD_FILES+=usr/share/groff_font/devcp1047/DESC
OLD_FILES+=usr/share/groff_font/devcp1047/I
OLD_FILES+=usr/share/groff_font/devcp1047/L
OLD_FILES+=usr/share/groff_font/devcp1047/R
OLD_FILES+=usr/share/groff_font/devcp1047/S
OLD_DIRS+=usr/share/groff_font/devcp1047
OLD_FILES+=usr/share/groff_font/devdvi/CW
OLD_FILES+=usr/share/groff_font/devdvi/CWEC
OLD_FILES+=usr/share/groff_font/devdvi/CWI
OLD_FILES+=usr/share/groff_font/devdvi/CWIEC
OLD_FILES+=usr/share/groff_font/devdvi/CWITC
OLD_FILES+=usr/share/groff_font/devdvi/CWTC
OLD_FILES+=usr/share/groff_font/devdvi/CompileFonts
OLD_FILES+=usr/share/groff_font/devdvi/DESC
OLD_FILES+=usr/share/groff_font/devdvi/EX
OLD_FILES+=usr/share/groff_font/devdvi/HB
OLD_FILES+=usr/share/groff_font/devdvi/HBEC
OLD_FILES+=usr/share/groff_font/devdvi/HBI
OLD_FILES+=usr/share/groff_font/devdvi/HBIEC
OLD_FILES+=usr/share/groff_font/devdvi/HBITC
OLD_FILES+=usr/share/groff_font/devdvi/HBTC
OLD_FILES+=usr/share/groff_font/devdvi/HI
OLD_FILES+=usr/share/groff_font/devdvi/HIEC
OLD_FILES+=usr/share/groff_font/devdvi/HITC
OLD_FILES+=usr/share/groff_font/devdvi/HR
OLD_FILES+=usr/share/groff_font/devdvi/HREC
OLD_FILES+=usr/share/groff_font/devdvi/HRTC
OLD_FILES+=usr/share/groff_font/devdvi/MI
OLD_FILES+=usr/share/groff_font/devdvi/Makefile
OLD_FILES+=usr/share/groff_font/devdvi/S
OLD_FILES+=usr/share/groff_font/devdvi/SA
OLD_FILES+=usr/share/groff_font/devdvi/SB
OLD_FILES+=usr/share/groff_font/devdvi/SC
OLD_FILES+=usr/share/groff_font/devdvi/TB
OLD_FILES+=usr/share/groff_font/devdvi/TBEC
OLD_FILES+=usr/share/groff_font/devdvi/TBI
OLD_FILES+=usr/share/groff_font/devdvi/TBIEC
OLD_FILES+=usr/share/groff_font/devdvi/TBITC
OLD_FILES+=usr/share/groff_font/devdvi/TBTC
OLD_FILES+=usr/share/groff_font/devdvi/TI
OLD_FILES+=usr/share/groff_font/devdvi/TIEC
OLD_FILES+=usr/share/groff_font/devdvi/TITC
OLD_FILES+=usr/share/groff_font/devdvi/TR
OLD_FILES+=usr/share/groff_font/devdvi/TREC
OLD_FILES+=usr/share/groff_font/devdvi/TRTC
OLD_FILES+=usr/share/groff_font/devdvi/ec.map
OLD_FILES+=usr/share/groff_font/devdvi/msam.map
OLD_FILES+=usr/share/groff_font/devdvi/msbm.map
OLD_FILES+=usr/share/groff_font/devdvi/tc.map
OLD_FILES+=usr/share/groff_font/devdvi/texb.map
OLD_FILES+=usr/share/groff_font/devdvi/texex.map
OLD_FILES+=usr/share/groff_font/devdvi/texi.map
OLD_FILES+=usr/share/groff_font/devdvi/texmi.map
OLD_FILES+=usr/share/groff_font/devdvi/texr.map
OLD_FILES+=usr/share/groff_font/devdvi/texsy.map
OLD_FILES+=usr/share/groff_font/devdvi/textex.map
OLD_FILES+=usr/share/groff_font/devdvi/textt.map
OLD_DIRS+=usr/share/groff_font/devdvi
OLD_FILES+=usr/share/groff_font/devhtml/B
OLD_FILES+=usr/share/groff_font/devhtml/BI
OLD_FILES+=usr/share/groff_font/devhtml/CB
OLD_FILES+=usr/share/groff_font/devhtml/CBI
OLD_FILES+=usr/share/groff_font/devhtml/CI
OLD_FILES+=usr/share/groff_font/devhtml/CR
OLD_FILES+=usr/share/groff_font/devhtml/DESC
OLD_FILES+=usr/share/groff_font/devhtml/I
OLD_FILES+=usr/share/groff_font/devhtml/R
OLD_FILES+=usr/share/groff_font/devhtml/S
OLD_DIRS+=usr/share/groff_font/devhtml
OLD_FILES+=usr/share/groff_font/devkoi8-r/B
OLD_FILES+=usr/share/groff_font/devkoi8-r/BI
OLD_FILES+=usr/share/groff_font/devkoi8-r/CW
OLD_FILES+=usr/share/groff_font/devkoi8-r/DESC
OLD_FILES+=usr/share/groff_font/devkoi8-r/I
OLD_FILES+=usr/share/groff_font/devkoi8-r/L
OLD_FILES+=usr/share/groff_font/devkoi8-r/R
OLD_FILES+=usr/share/groff_font/devkoi8-r/S
OLD_DIRS+=usr/share/groff_font/devkoi8-r
OLD_FILES+=usr/share/groff_font/devlatin1/B
OLD_FILES+=usr/share/groff_font/devlatin1/BI
OLD_FILES+=usr/share/groff_font/devlatin1/CW
OLD_FILES+=usr/share/groff_font/devlatin1/DESC
OLD_FILES+=usr/share/groff_font/devlatin1/I
OLD_FILES+=usr/share/groff_font/devlatin1/L
OLD_FILES+=usr/share/groff_font/devlatin1/R
OLD_FILES+=usr/share/groff_font/devlatin1/S
OLD_DIRS+=usr/share/groff_font/devlatin1
OLD_FILES+=usr/share/groff_font/devlbp/CB
OLD_FILES+=usr/share/groff_font/devlbp/CI
OLD_FILES+=usr/share/groff_font/devlbp/CR
OLD_FILES+=usr/share/groff_font/devlbp/DESC
OLD_FILES+=usr/share/groff_font/devlbp/EB
OLD_FILES+=usr/share/groff_font/devlbp/EI
OLD_FILES+=usr/share/groff_font/devlbp/ER
OLD_FILES+=usr/share/groff_font/devlbp/HB
OLD_FILES+=usr/share/groff_font/devlbp/HBI
OLD_FILES+=usr/share/groff_font/devlbp/HI
OLD_FILES+=usr/share/groff_font/devlbp/HNB
OLD_FILES+=usr/share/groff_font/devlbp/HNBI
OLD_FILES+=usr/share/groff_font/devlbp/HNI
OLD_FILES+=usr/share/groff_font/devlbp/HNR
OLD_FILES+=usr/share/groff_font/devlbp/HR
OLD_FILES+=usr/share/groff_font/devlbp/TB
OLD_FILES+=usr/share/groff_font/devlbp/TBI
OLD_FILES+=usr/share/groff_font/devlbp/TI
OLD_FILES+=usr/share/groff_font/devlbp/TR
OLD_DIRS+=usr/share/groff_font/devlbp
OLD_FILES+=usr/share/groff_font/devlj4/AB
OLD_FILES+=usr/share/groff_font/devlj4/ABI
OLD_FILES+=usr/share/groff_font/devlj4/AI
OLD_FILES+=usr/share/groff_font/devlj4/ALBB
OLD_FILES+=usr/share/groff_font/devlj4/ALBR
OLD_FILES+=usr/share/groff_font/devlj4/AOB
OLD_FILES+=usr/share/groff_font/devlj4/AOI
OLD_FILES+=usr/share/groff_font/devlj4/AOR
OLD_FILES+=usr/share/groff_font/devlj4/AR
OLD_FILES+=usr/share/groff_font/devlj4/CB
OLD_FILES+=usr/share/groff_font/devlj4/CBI
OLD_FILES+=usr/share/groff_font/devlj4/CI
OLD_FILES+=usr/share/groff_font/devlj4/CLARENDON
OLD_FILES+=usr/share/groff_font/devlj4/CORONET
OLD_FILES+=usr/share/groff_font/devlj4/CR
OLD_FILES+=usr/share/groff_font/devlj4/DESC
OLD_FILES+=usr/share/groff_font/devlj4/GB
OLD_FILES+=usr/share/groff_font/devlj4/GBI
OLD_FILES+=usr/share/groff_font/devlj4/GI
OLD_FILES+=usr/share/groff_font/devlj4/GR
OLD_FILES+=usr/share/groff_font/devlj4/LGB
OLD_FILES+=usr/share/groff_font/devlj4/LGI
OLD_FILES+=usr/share/groff_font/devlj4/LGR
OLD_FILES+=usr/share/groff_font/devlj4/MARIGOLD
OLD_FILES+=usr/share/groff_font/devlj4/OB
OLD_FILES+=usr/share/groff_font/devlj4/OBI
OLD_FILES+=usr/share/groff_font/devlj4/OI
OLD_FILES+=usr/share/groff_font/devlj4/OR
OLD_FILES+=usr/share/groff_font/devlj4/S
OLD_FILES+=usr/share/groff_font/devlj4/SYMBOL
OLD_FILES+=usr/share/groff_font/devlj4/TB
OLD_FILES+=usr/share/groff_font/devlj4/TBI
OLD_FILES+=usr/share/groff_font/devlj4/TI
OLD_FILES+=usr/share/groff_font/devlj4/TNRB
OLD_FILES+=usr/share/groff_font/devlj4/TNRBI
OLD_FILES+=usr/share/groff_font/devlj4/TNRI
OLD_FILES+=usr/share/groff_font/devlj4/TNRR
OLD_FILES+=usr/share/groff_font/devlj4/TR
OLD_FILES+=usr/share/groff_font/devlj4/UB
OLD_FILES+=usr/share/groff_font/devlj4/UBI
OLD_FILES+=usr/share/groff_font/devlj4/UCB
OLD_FILES+=usr/share/groff_font/devlj4/UCBI
OLD_FILES+=usr/share/groff_font/devlj4/UCI
OLD_FILES+=usr/share/groff_font/devlj4/UCR
OLD_FILES+=usr/share/groff_font/devlj4/UI
OLD_FILES+=usr/share/groff_font/devlj4/UR
OLD_FILES+=usr/share/groff_font/devlj4/WINGDINGS
OLD_DIRS+=usr/share/groff_font/devlj4
OLD_FILES+=usr/share/groff_font/devps/AB
OLD_FILES+=usr/share/groff_font/devps/ABI
OLD_FILES+=usr/share/groff_font/devps/AI
OLD_FILES+=usr/share/groff_font/devps/AR
OLD_FILES+=usr/share/groff_font/devps/BMB
OLD_FILES+=usr/share/groff_font/devps/BMBI
OLD_FILES+=usr/share/groff_font/devps/BMI
OLD_FILES+=usr/share/groff_font/devps/BMR
OLD_FILES+=usr/share/groff_font/devps/CB
OLD_FILES+=usr/share/groff_font/devps/CBI
OLD_FILES+=usr/share/groff_font/devps/CI
OLD_FILES+=usr/share/groff_font/devps/CR
OLD_FILES+=usr/share/groff_font/devps/DESC
OLD_FILES+=usr/share/groff_font/devps/EURO
OLD_FILES+=usr/share/groff_font/devps/HB
OLD_FILES+=usr/share/groff_font/devps/HBI
OLD_FILES+=usr/share/groff_font/devps/HI
OLD_FILES+=usr/share/groff_font/devps/HNB
OLD_FILES+=usr/share/groff_font/devps/HNBI
OLD_FILES+=usr/share/groff_font/devps/HNI
OLD_FILES+=usr/share/groff_font/devps/HNR
OLD_FILES+=usr/share/groff_font/devps/HR
OLD_FILES+=usr/share/groff_font/devps/Makefile
OLD_FILES+=usr/share/groff_font/devps/NB
OLD_FILES+=usr/share/groff_font/devps/NBI
OLD_FILES+=usr/share/groff_font/devps/NI
OLD_FILES+=usr/share/groff_font/devps/NR
OLD_FILES+=usr/share/groff_font/devps/PB
OLD_FILES+=usr/share/groff_font/devps/PBI
OLD_FILES+=usr/share/groff_font/devps/PI
OLD_FILES+=usr/share/groff_font/devps/PR
OLD_FILES+=usr/share/groff_font/devps/S
OLD_FILES+=usr/share/groff_font/devps/SS
OLD_FILES+=usr/share/groff_font/devps/TB
OLD_FILES+=usr/share/groff_font/devps/TBI
OLD_FILES+=usr/share/groff_font/devps/TI
OLD_FILES+=usr/share/groff_font/devps/TR
OLD_FILES+=usr/share/groff_font/devps/ZCMI
OLD_FILES+=usr/share/groff_font/devps/ZD
OLD_FILES+=usr/share/groff_font/devps/ZDR
OLD_FILES+=usr/share/groff_font/devps/afmname
OLD_FILES+=usr/share/groff_font/devps/dingbats.map
OLD_FILES+=usr/share/groff_font/devps/dingbats.rmap
OLD_FILES+=usr/share/groff_font/devps/download
OLD_FILES+=usr/share/groff_font/devps/freeeuro.pfa
OLD_FILES+=usr/share/groff_font/devps/lgreekmap
OLD_FILES+=usr/share/groff_font/devps/prologue
OLD_FILES+=usr/share/groff_font/devps/symbol.sed
OLD_FILES+=usr/share/groff_font/devps/symbolchars
OLD_FILES+=usr/share/groff_font/devps/symbolsl.afm
OLD_FILES+=usr/share/groff_font/devps/symbolsl.pfa
OLD_FILES+=usr/share/groff_font/devps/text.enc
OLD_FILES+=usr/share/groff_font/devps/textmap
OLD_FILES+=usr/share/groff_font/devps/zapfdr.pfa
OLD_DIRS+=usr/share/groff_font/devps
OLD_FILES+=usr/share/groff_font/devutf8/B
OLD_FILES+=usr/share/groff_font/devutf8/BI
OLD_FILES+=usr/share/groff_font/devutf8/CW
OLD_FILES+=usr/share/groff_font/devutf8/DESC
OLD_FILES+=usr/share/groff_font/devutf8/I
OLD_FILES+=usr/share/groff_font/devutf8/L
OLD_FILES+=usr/share/groff_font/devutf8/R
OLD_FILES+=usr/share/groff_font/devutf8/S
OLD_DIRS+=usr/share/groff_font/devutf8
OLD_DIRS+=usr/share/groff_font
OLD_FILES+=usr/share/man/man1/addftinfo.1.gz
OLD_FILES+=usr/share/man/man1/afmtodit.1.gz
OLD_FILES+=usr/share/man/man1/checknr.1.gz
OLD_FILES+=usr/share/man/man1/colcrt.1.gz
OLD_FILES+=usr/share/man/man1/eqn.1.gz
OLD_FILES+=usr/share/man/man1/grn.1.gz
OLD_FILES+=usr/share/man/man1/grodvi.1.gz
OLD_FILES+=usr/share/man/man1/groff.1.gz
OLD_FILES+=usr/share/man/man1/grog.1.gz
OLD_FILES+=usr/share/man/man1/grolbp.1.gz
OLD_FILES+=usr/share/man/man1/grolj4.1.gz
OLD_FILES+=usr/share/man/man1/grops.1.gz
OLD_FILES+=usr/share/man/man1/grotty.1.gz
OLD_FILES+=usr/share/man/man1/hpftodit.1.gz
OLD_FILES+=usr/share/man/man1/indxbib.1.gz
OLD_FILES+=usr/share/man/man1/lkbib.1.gz
OLD_FILES+=usr/share/man/man1/lookbib.1.gz
OLD_FILES+=usr/share/man/man1/mmroff.1.gz
OLD_FILES+=usr/share/man/man1/neqn.1.gz
OLD_FILES+=usr/share/man/man1/nroff.1.gz
OLD_FILES+=usr/share/man/man1/pfbtops.1.gz
OLD_FILES+=usr/share/man/man1/pic.1.gz
OLD_FILES+=usr/share/man/man1/psroff.1.gz
OLD_FILES+=usr/share/man/man1/refer.1.gz
OLD_FILES+=usr/share/man/man1/tbl.1.gz
OLD_FILES+=usr/share/man/man1/tfmtodit.1.gz
OLD_FILES+=usr/share/man/man1/troff.1.gz
OLD_FILES+=usr/share/man/man1/vgrind.1.gz
OLD_FILES+=usr/share/man/man5/groff_font.5.gz
OLD_FILES+=usr/share/man/man5/groff_out.5.gz
OLD_FILES+=usr/share/man/man5/groff_tmac.5.gz
OLD_FILES+=usr/share/man/man5/lj4_font.5.gz
OLD_FILES+=usr/share/man/man5/tmac.5.gz
OLD_FILES+=usr/share/man/man5/vgrindefs.5.gz
OLD_FILES+=usr/share/man/man7/ditroff.7.gz
OLD_FILES+=usr/share/man/man7/groff.7.gz
OLD_FILES+=usr/share/man/man7/groff_char.7.gz
OLD_FILES+=usr/share/man/man7/groff_diff.7.gz
OLD_FILES+=usr/share/man/man7/groff_man.7.gz
OLD_FILES+=usr/share/man/man7/groff_mdoc.7.gz
OLD_FILES+=usr/share/man/man7/groff_me.7.gz
OLD_FILES+=usr/share/man/man7/groff_mm.7.gz
OLD_FILES+=usr/share/man/man7/groff_mmse.7.gz
OLD_FILES+=usr/share/man/man7/groff_ms.7.gz
OLD_FILES+=usr/share/man/man7/groff_trace.7.gz
OLD_FILES+=usr/share/man/man7/groff_www.7.gz
OLD_FILES+=usr/share/man/man7/mdoc.samples.7.gz
OLD_FILES+=usr/share/man/man7/me.7.gz
OLD_FILES+=usr/share/man/man7/mm.7.gz
OLD_FILES+=usr/share/man/man7/mmse.7.gz
OLD_FILES+=usr/share/man/man7/ms.7.gz
OLD_FILES+=usr/share/man/man7/orig_me.7.gz
OLD_FILES+=usr/share/me/acm.me
OLD_FILES+=usr/share/me/chars.me
OLD_FILES+=usr/share/me/deltext.me
OLD_FILES+=usr/share/me/eqn.me
OLD_FILES+=usr/share/me/float.me
OLD_FILES+=usr/share/me/footnote.me
OLD_FILES+=usr/share/me/index.me
OLD_FILES+=usr/share/me/letterhead.me
OLD_FILES+=usr/share/me/local.me
OLD_FILES+=usr/share/me/null.me
OLD_FILES+=usr/share/me/refer.me
OLD_FILES+=usr/share/me/revisions
OLD_FILES+=usr/share/me/sh.me
OLD_FILES+=usr/share/me/tbl.me
OLD_FILES+=usr/share/me/thesis.me
OLD_DIRS+=usr/share/me
OLD_FILES+=usr/share/misc/vgrindefs
OLD_FILES+=usr/share/misc/vgrindefs.db
OLD_FILES+=usr/share/tmac/X.tmac
OLD_FILES+=usr/share/tmac/Xps.tmac
OLD_FILES+=usr/share/tmac/a4.tmac
OLD_FILES+=usr/share/tmac/an-old.tmac
OLD_FILES+=usr/share/tmac/an.tmac
OLD_FILES+=usr/share/tmac/andoc.tmac
OLD_FILES+=usr/share/tmac/composite.tmac
OLD_FILES+=usr/share/tmac/cp1047.tmac
OLD_FILES+=usr/share/tmac/devtag.tmac
OLD_FILES+=usr/share/tmac/doc.tmac
OLD_FILES+=usr/share/tmac/dvi.tmac
OLD_FILES+=usr/share/tmac/e.tmac
OLD_FILES+=usr/share/tmac/ec.tmac
OLD_FILES+=usr/share/tmac/eqnrc
OLD_FILES+=usr/share/tmac/europs.tmac
OLD_FILES+=usr/share/tmac/html-end.tmac
OLD_FILES+=usr/share/tmac/html.tmac
OLD_FILES+=usr/share/tmac/hyphen.ru
OLD_FILES+=usr/share/tmac/hyphen.us
OLD_FILES+=usr/share/tmac/hyphenex.us
OLD_FILES+=usr/share/tmac/koi8-r.tmac
OLD_FILES+=usr/share/tmac/latin1.tmac
OLD_FILES+=usr/share/tmac/latin2.tmac
OLD_FILES+=usr/share/tmac/latin9.tmac
OLD_FILES+=usr/share/tmac/lbp.tmac
OLD_FILES+=usr/share/tmac/lj4.tmac
OLD_FILES+=usr/share/tmac/m.tmac
OLD_FILES+=usr/share/tmac/man.local
OLD_FILES+=usr/share/tmac/man.tmac
OLD_FILES+=usr/share/tmac/mandoc.tmac
OLD_FILES+=usr/share/tmac/mdoc.local
OLD_FILES+=usr/share/tmac/mdoc.tmac
OLD_FILES+=usr/share/tmac/mdoc/doc-common
OLD_FILES+=usr/share/tmac/mdoc/doc-ditroff
OLD_FILES+=usr/share/tmac/mdoc/doc-nroff
OLD_FILES+=usr/share/tmac/mdoc/doc-syms
OLD_FILES+=usr/share/tmac/mdoc/fr.ISO8859-1
OLD_FILES+=usr/share/tmac/mdoc/ru.KOI8-R
OLD_DIRS+=usr/share/tmac/mdoc
OLD_FILES+=usr/share/tmac/me.tmac
OLD_FILES+=usr/share/tmac/mm/0.MT
OLD_FILES+=usr/share/tmac/mm/4.MT
OLD_FILES+=usr/share/tmac/mm/5.MT
OLD_FILES+=usr/share/tmac/mm/locale
OLD_FILES+=usr/share/tmac/mm/mm.tmac
OLD_FILES+=usr/share/tmac/mm/mmse.tmac
OLD_FILES+=usr/share/tmac/mm/ms.cov
OLD_FILES+=usr/share/tmac/mm/se_locale
OLD_FILES+=usr/share/tmac/mm/se_ms.cov
OLD_DIRS+=usr/share/tmac/mm
OLD_FILES+=usr/share/tmac/ms.tmac
OLD_FILES+=usr/share/tmac/mse.tmac
OLD_FILES+=usr/share/tmac/papersize.tmac
OLD_FILES+=usr/share/tmac/pic.tmac
OLD_FILES+=usr/share/tmac/ps.tmac
OLD_FILES+=usr/share/tmac/psatk.tmac
OLD_FILES+=usr/share/tmac/psold.tmac
OLD_FILES+=usr/share/tmac/pspic.tmac
OLD_FILES+=usr/share/tmac/s.tmac
OLD_FILES+=usr/share/tmac/safer.tmac
OLD_FILES+=usr/share/tmac/tmac.orig_me
OLD_FILES+=usr/share/tmac/tmac.vgrind
OLD_FILES+=usr/share/tmac/trace.tmac
OLD_FILES+=usr/share/tmac/troffrc
OLD_FILES+=usr/share/tmac/troffrc-end
OLD_FILES+=usr/share/tmac/tty-char.tmac
OLD_FILES+=usr/share/tmac/tty.tmac
OLD_FILES+=usr/share/tmac/unicode.tmac
OLD_FILES+=usr/share/tmac/www.tmac
OLD_DIRS+=usr/share/tmac
# 20170607: remove incorrect atf_check(1) manpage link
OLD_FILES+=usr/share/man/man1/atf_check.1.gz
# 20170601: remove stale manpage
OLD_FILES+=usr/share/man/man2/cap_rights_get.2.gz
# 20170601: old libifconfig and libifc
OLD_FILES+=usr/lib/libifc.a
OLD_FILES+=usr/lib/libifc_p.a
OLD_FILES+=usr/lib/libifconfig.a
OLD_FILES+=usr/lib/libifconfig_p.a
OLD_FILES+=usr/lib32/libifc.a
OLD_FILES+=usr/lib32/libifc_p.a
OLD_FILES+=usr/lib32/libifconfig.a
OLD_FILES+=usr/lib32/libifconfig_p.a
# 20170529: mount.conf(8) -> mount.conf(5)
OLD_FILES+=usr/share/man/man8/mount.conf.8.gz
# 20170525: remove misleading template
OLD_FILES+=usr/share/misc/man.template
# 20170525: disconnect the roff docs from the build
OLD_FILES+=usr/share/doc/papers/beyond43.ascii.gz
OLD_FILES+=usr/share/doc/papers/bio.ascii.gz
OLD_FILES+=usr/share/doc/papers/contents.ascii.gz
OLD_FILES+=usr/share/doc/papers/devfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/diskperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/fsinterface.ascii.gz
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
OLD_FILES+=usr/share/doc/papers/jail.ascii.gz
OLD_FILES+=usr/share/doc/papers/kernmalloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/kerntune.ascii.gz
OLD_FILES+=usr/share/doc/papers/malloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/newvm.ascii.gz
OLD_FILES+=usr/share/doc/papers/releng.ascii.gz
OLD_FILES+=usr/share/doc/papers/sysperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/timecounter.ascii.gz
OLD_DIRS+=usr/share/doc/papers
OLD_FILES+=usr/share/doc/psd/01.cacm/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/01.cacm
OLD_FILES+=usr/share/doc/psd/02.implement/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/02.implement
OLD_FILES+=usr/share/doc/psd/03.iosys/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/03.iosys
OLD_FILES+=usr/share/doc/psd/04.uprog/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/04.uprog
OLD_FILES+=usr/share/doc/psd/05.sysman/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/05.sysman
OLD_FILES+=usr/share/doc/psd/06.Clang/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/06.Clang
OLD_FILES+=usr/share/doc/psd/12.make/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/12.make
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/15.yacc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/15.yacc
OLD_FILES+=usr/share/doc/psd/16.lex/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/16.lex
OLD_FILES+=usr/share/doc/psd/17.m4/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/17.m4
OLD_FILES+=usr/share/doc/psd/18.gprof/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/18.gprof
OLD_FILES+=usr/share/doc/psd/20.ipctut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/20.ipctut
OLD_FILES+=usr/share/doc/psd/21.ipc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/21.ipc
OLD_FILES+=usr/share/doc/psd/22.rpcgen/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/22.rpcgen
OLD_FILES+=usr/share/doc/psd/23.rpc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/23.rpc
OLD_FILES+=usr/share/doc/psd/24.xdr/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/24.xdr
OLD_FILES+=usr/share/doc/psd/25.xdrrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/25.xdrrfc
OLD_FILES+=usr/share/doc/psd/26.rpcrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/26.rpcrfc
OLD_FILES+=usr/share/doc/psd/27.nfsrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/27.nfsrfc
OLD_FILES+=usr/share/doc/psd/Title.ascii.gz
OLD_FILES+=usr/share/doc/psd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/psd/
OLD_FILES+=usr/share/doc/smm/01.setup/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/01.setup
OLD_FILES+=usr/share/doc/smm/02.config/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/02.config
OLD_FILES+=usr/share/doc/smm/03.fsck/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/03.fsck
OLD_FILES+=usr/share/doc/smm/04.quotas/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/04.quotas
OLD_FILES+=usr/share/doc/smm/05.fastfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/05.fastfs
OLD_FILES+=usr/share/doc/smm/06.nfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/06.nfs
OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/07.lpd
OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/08.sendmailop
OLD_FILES+=usr/share/doc/smm/11.timedop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/11.timedop
OLD_FILES+=usr/share/doc/smm/12.timed/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/12.timed
OLD_FILES+=usr/share/doc/smm/18.net/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/18.net
OLD_FILES+=usr/share/doc/smm/Title.ascii.gz
OLD_FILES+=usr/share/doc/smm/contents.ascii.gz
OLD_DIRS+=usr/share/doc/smm
OLD_FILES+=usr/share/doc/usd/04.csh/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/04.csh
OLD_FILES+=usr/share/doc/usd/05.dc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/05.dc
OLD_FILES+=usr/share/doc/usd/06.bc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/06.bc
OLD_FILES+=usr/share/doc/usd/07.mail/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/07.mail
OLD_FILES+=usr/share/doc/usd/10.exref/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/10.exref/summary.ascii.gz
OLD_DIRS+=usr/share/doc/usd/10.exref
OLD_FILES+=usr/share/doc/usd/11.edit/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/11.edit
OLD_FILES+=usr/share/doc/usd/12.vi/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/summary.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/viapwh.ascii.gz
OLD_DIRS+=usr/share/doc/usd/12.vi
OLD_FILES+=usr/share/doc/usd/13.viref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/13.viref
OLD_FILES+=usr/share/doc/usd/18.msdiffs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/18.msdiffs
OLD_FILES+=usr/share/doc/usd/19.memacros/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/19.memacros
OLD_FILES+=usr/share/doc/usd/20.meref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/20.meref
OLD_FILES+=usr/share/doc/usd/21.troff/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/21.troff
OLD_FILES+=usr/share/doc/usd/22.trofftut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/22.trofftut
OLD_FILES+=usr/share/doc/usd/Title.ascii.gz
OLD_FILES+=usr/share/doc/usd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/usd
# 20170523: 64-bit inode support, library version bumps
OLD_LIBS+=lib/libzfs.so.2
OLD_LIBS+=usr/lib/libarchive.so.6
OLD_LIBS+=usr/lib/libmilter.so.5
OLD_LIBS+=usr/lib32/libzfs.so.2
OLD_LIBS+=usr/lib32/libarchive.so.6
OLD_LIBS+=usr/lib32/libmilter.so.5
# 20170427: NATM configuration support removed
OLD_FILES+=etc/rc.d/atm1
OLD_FILES+=etc/rc.d/atm2
OLD_FILES+=etc/rc.d/atm3
# 20170424: NATM support removed
OLD_FILES+=rescue/atmconfig
OLD_FILES+=sbin/atmconfig
OLD_FILES+=usr/include/bsnmp/snmp_atm.h
OLD_FILES+=usr/include/dev/utopia/idtphy.h
OLD_FILES+=usr/include/dev/utopia/suni.h
OLD_FILES+=usr/include/dev/utopia/utopia.h
OLD_FILES+=usr/include/dev/utopia/utopia_priv.h
OLD_DIRS+=usr/include/dev/utopia
OLD_FILES+=usr/include/net/if_atm.h
OLD_FILES+=usr/include/netgraph/atm/ng_atm.h
OLD_FILES+=usr/include/netinet/if_atm.h
OLD_FILES+=usr/include/netnatm/natm.h
OLD_FILES+=usr/lib/debug/sbin/atmconfig.debug
OLD_FILES+=usr/lib/debug/usr/lib/snmp_atm.so.6.debug
OLD_FILES+=usr/lib/snmp_atm.so
OLD_FILES+=usr/lib/snmp_atm.so.6
OLD_FILES+=usr/share/doc/atm/atmconfig.help
OLD_FILES+=usr/share/doc/atm/atmconfig_device.help
OLD_DIRS+=usr/share/doc/atm
OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz
OLD_FILES+=usr/share/man/man4/en.4.gz
OLD_FILES+=usr/share/man/man4/fatm.4.gz
OLD_FILES+=usr/share/man/man4/hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_en.4.gz
OLD_FILES+=usr/share/man/man4/if_fatm.4.gz
OLD_FILES+=usr/share/man/man4/if_hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_patm.4.gz
OLD_FILES+=usr/share/man/man4/natm.4.gz
OLD_FILES+=usr/share/man/man4/natmip.4.gz
OLD_FILES+=usr/share/man/man4/ng_atm.4.gz
OLD_FILES+=usr/share/man/man4/patm.4.gz
OLD_FILES+=usr/share/man/man4/utopia.4.gz
OLD_FILES+=usr/share/man/man8/atmconfig.8.gz
OLD_FILES+=usr/share/man/man9/utopia.9.gz
OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def
OLD_FILES+=usr/share/snmp/defs/atm_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt
# 20170420: remove GNU diff
OLD_FILES+=usr/share/man/man7/diff.7.gz
# 20170322: rename <x> to <x>_test to match the FreeBSD test suite name scheme
OLD_FILES+=usr/tests/usr.bin/col/col
OLD_FILES+=usr/tests/usr.bin/diff/diff
OLD_FILES+=usr/tests/usr.bin/ident/ident
OLD_FILES+=usr/tests/usr.bin/mkimg/mkimg
OLD_FILES+=usr/tests/usr.bin/sdiff/sdiff
OLD_FILES+=usr/tests/usr.bin/soelim/soelim
OLD_FILES+=usr/tests/usr.sbin/pw/pw_config
OLD_FILES+=usr/tests/usr.sbin/pw/pw_etcdir
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupadd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupmod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_lock
OLD_FILES+=usr/tests/usr.sbin/pw/pw_useradd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_userdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usermod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usernext
# 20170322: garbage collect old references to igb(4)
OLD_FILES+=usr/share/man/man4/if_igb.4.gz
OLD_FILES+=usr/share/man/man4/igb.4.gz
# 20170319: io_test requires zh_TW.Big5 locale.
OLD_FILES+=usr/tests/lib/libc/locale/io_test
# 20170319: remove nls for non supported Big5* locales
OLD_DIRS+=usr/share/nls/zh_HK.Big5HKSCS
OLD_DIRS+=usr/share/nls/zh_TW.Big5
# 20170313: move .../sys/geom/eli/... to .../sys/geom/class/eli/...
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/pbkdf2
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/Kyuafile
OLD_FILES+=usr/tests/sys/geom/eli/Kyuafile
OLD_DIRS+=usr/tests/sys/geom/eli/pbkdf2
OLD_DIRS+=usr/tests/sys/geom/eli
# 20170313: sbin/ipftest and ipresend temporarily disconnected.
OLD_FILES+=sbin/ipftest
OLD_FILES+=sbin/ipresend
# 20170311: Remove WITHOUT_MANDOCDB option
OLD_FILES+=usr/share/man/man1/makewhatis.1.gz
# 20170308: rename some tests
OLD_FILES+=usr/tests/bin/pwait/pwait
OLD_FILES+=usr/tests/usr.bin/timeout/timeout
# 20170307: remove pcap-int.h
OLD_FILES+=usr/include/pcap-int.h
# 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/include/c++/v1/__undef___deallocate
OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate
# 20170302: new clang import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.1/include
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.1/lib
OLD_DIRS+=usr/lib/clang/3.9.1
# 20170226: SVR4 compatibility removed
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/streams.4
OLD_FILES+=usr/share/man/man4/svr4.4
.endif
# 20170219: OpenPAM RADULA upgrade removed the libpam tests
OLD_FILES+=usr/tests/lib/libpam/Kyuafile
OLD_FILES+=usr/tests/lib/libpam/t_openpam_ctype
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readlinev
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readword
OLD_DIRS+=usr/test/lib/libpam
# 20170206: remove bdes(1)
OLD_FILES+=usr/bin/bdes
OLD_FILES+=usr/lib/debug/usr/bin/bdes.debug
OLD_FILES+=usr/share/man/man1/bdes.1.gz
# 20170206: merged projects/ipsec
OLD_FILES+=usr/include/netinet/ip_ipsec.h
OLD_FILES+=usr/include/netinet6/ip6_ipsec.h
# 20170128: remove pc98 support
OLD_FILES+=usr/include/dev/ic/i8251.h
OLD_FILES+=usr/include/dev/ic/i8255.h
OLD_FILES+=usr/include/dev/ic/rsa.h
OLD_FILES+=usr/include/dev/ic/wd33c93reg.h
OLD_FILES+=usr/include/sys/disk/pc98.h
OLD_FILES+=usr/include/sys/diskpc98.h
OLD_FILES+=usr/share/man/man4/i386/ct.4.gz
OLD_FILES+=usr/share/man/man4/i386/snc.4.gz
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.kbd
# 20170110: Four files from ggate tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/gate/1_test
OLD_FILES+=usr/tests/sys/geom/class/gate/2_test
OLD_FILES+=usr/tests/sys/geom/class/gate/3_test
OLD_FILES+=usr/tests/sys/geom/class/gate/conf.sh
# 20170103: libbsnmptools.so made into an INTERNALLIB
OLD_FILES+=usr/lib/libbsnmptools.a
OLD_FILES+=usr/lib/libbsnmptools_p.a
OLD_LIBS+=usr/lib/libbsnmptools.so.0
OLD_LIBS+=usr/lib/libbsnmptools.so
# 20170102: sysdecode_getfsstat_flags() renamed to sysdecode_getfsstat_mode()
OLD_FILES+=usr/share/man/man3/sysdecode_getfsstat_flags.3.gz
# 20161230: libarchive ACL pax test renamed to test_acl_pax_posix1e.tar.uu
OLD_FILES+=usr/tests/lib/libarchive/test_acl_pax.tar.uu
# 20161229: Three files from gnop tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/nop/1_test
OLD_FILES+=usr/tests/sys/geom/class/nop/2_test
OLD_FILES+=usr/tests/sys/geom/class/nop/conf.sh
# 20161217: new clang import which bumps version from 3.9.0 to 3.9.1.
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.0/include
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.0/lib
OLD_DIRS+=usr/lib/clang/3.9.0
# 20161205: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.3
OLD_LIBS+=usr/lib32/libproc.so.3
# 20161127: Remove vm_page_cache(9)
OLD_FILES+=usr/share/man/man9/vm_page_cache.9.gz
# 20161124: new clang import which bumps version from 3.8.0 to 3.9.0.
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.8.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.8.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.8.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.8.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.8.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.8.0/include
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.8.0/lib
OLD_DIRS+=usr/lib/clang/3.8.0
# 20161121: Hyper-V manuals only apply to amd64 and i386.
.if ${TARGET_ARCH} != "amd64" && ${TARGET_ARCH} != "i386"
OLD_FILES+=usr/share/man/man4/hv_kvp.4.gz
OLD_FILES+=usr/share/man/man4/hv_netvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_storvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_utils.4.gz
OLD_FILES+=usr/share/man/man4/hv_vmbus.4.gz
OLD_FILES+=usr/share/man/man4/hv_vss.4.gz
.endif
# 20161118: Remove hv_ata_pci_disengage(4)
OLD_FILES+=usr/share/man/man4/hv_ata_pci_disengage.4.gz
# 20161017: urtwn(4) was merged into rtwn(4)
OLD_FILES+=usr/share/man/man4/urtwn.4.gz
OLD_FILES+=usr/share/man/man4/urtwnfw.4.gz
# 20161015: Remove GNU rcs
OLD_FILES+=usr/bin/ci
OLD_FILES+=usr/bin/co
OLD_FILES+=usr/bin/merge
OLD_FILES+=usr/bin/rcs
OLD_FILES+=usr/bin/rcsclean
OLD_FILES+=usr/bin/rcsdiff
OLD_FILES+=usr/bin/rcsfreeze
OLD_FILES+=usr/bin/rcsmerge
OLD_FILES+=usr/bin/rlog
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/man/man1/ci.1.gz
OLD_FILES+=usr/share/man/man1/co.1.gz
OLD_FILES+=usr/share/man/man1/merge.1.gz
OLD_FILES+=usr/share/man/man1/rcs.1.gz
OLD_FILES+=usr/share/man/man1/rcsclean.1.gz
OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz
OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz
OLD_FILES+=usr/share/man/man1/rcsintro.1.gz
OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz
OLD_FILES+=usr/share/man/man1/rlog.1.gz
OLD_FILES+=usr/share/man/man5/rcsfile.5.gz
# 20161010: remove link to removed m_getclr(9) macro
OLD_FILES+=usr/share/man/man9/m_getclr.9.gz
# 20161003: MK_ELFCOPY_AS_OBJCOPY option retired
OLD_FILES+=usr/bin/elfcopy
OLD_FILES+=usr/share/man/man1/elfcopy.1.gz
# 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue
OLD_FILES+=usr/tests/sys/kqueue/kqtest
OLD_FILES+=usr/tests/sys/kqueue/kqueue_test
# 20160903: idle page zeroing support removed
OLD_FILES+=usr/share/man/man9/pmap_zero_idle.9.gz
# 20160901: Remove digi(4)
OLD_FILES+=usr/share/man/man4/digi.4.gz
# 20160819: Remove ie(4)
OLD_FILES+=usr/share/man/man4/i386/ie.4.gz
# 20160819: Remove spic(4)
OLD_FILES+=usr/share/man/man4/spic.4.gz
# 20160819: Remove wl(4) and wlconfig(8)
OLD_FILES+=usr/share/man/man4/i386/wl.4.gz
OLD_FILES+=usr/sbin/wlconfig
OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz
# 20160819: Remove si(4) and sicontrol(8)
OLD_FILES+=usr/share/man/man4/si.4.gz
OLD_FILES+=usr/sbin/sicontrol
OLD_FILES+=usr/share/man/man8/sicontrol.8.gz
# 20160819: Remove scd(4)
OLD_FILES+=usr/share/man/man4/scd.4.gz
# 20160815: Remove mcd(4)
OLD_FILES+=usr/share/man/man4/mcd.4.gz
# 20160805: lockmgr_waiters(9) removed
OLD_FILES+=usr/share/man/man9/lockmgr_waiters.9.gz
# 20160703: POSIXify locales with variants
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.UTF-8
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.ISO8859-2
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.ISO8859-5
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/mn_Cyrl_MN.UTF-8
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/kk_Cyrl_KZ.UTF-8
# 20160608: removed pam_verbose_error
OLD_LIBS+=usr/lib/libpam.so.5
OLD_LIBS+=usr/lib/pam_chroot.so.5
OLD_LIBS+=usr/lib/pam_deny.so.5
OLD_LIBS+=usr/lib/pam_echo.so.5
OLD_LIBS+=usr/lib/pam_exec.so.5
OLD_LIBS+=usr/lib/pam_ftpusers.so.5
OLD_LIBS+=usr/lib/pam_group.so.5
OLD_LIBS+=usr/lib/pam_guest.so.5
OLD_LIBS+=usr/lib/pam_krb5.so.5
OLD_LIBS+=usr/lib/pam_ksu.so.5
OLD_LIBS+=usr/lib/pam_lastlog.so.5
OLD_LIBS+=usr/lib/pam_login_access.so.5
OLD_LIBS+=usr/lib/pam_nologin.so.5
OLD_LIBS+=usr/lib/pam_opie.so.5
OLD_LIBS+=usr/lib/pam_opieaccess.so.5
OLD_LIBS+=usr/lib/pam_passwdqc.so.5
OLD_LIBS+=usr/lib/pam_permit.so.5
OLD_LIBS+=usr/lib/pam_radius.so.5
OLD_LIBS+=usr/lib/pam_rhosts.so.5
OLD_LIBS+=usr/lib/pam_rootok.so.5
OLD_LIBS+=usr/lib/pam_securetty.so.5
OLD_LIBS+=usr/lib/pam_self.so.5
OLD_LIBS+=usr/lib/pam_ssh.so.5
OLD_LIBS+=usr/lib/pam_tacplus.so.5
OLD_LIBS+=usr/lib/pam_unix.so.5
OLD_LIBS+=usr/lib32/libpam.so.5
OLD_LIBS+=usr/lib32/pam_chroot.so.5
OLD_LIBS+=usr/lib32/pam_deny.so.5
OLD_LIBS+=usr/lib32/pam_echo.so.5
OLD_LIBS+=usr/lib32/pam_exec.so.5
OLD_LIBS+=usr/lib32/pam_ftpusers.so.5
OLD_LIBS+=usr/lib32/pam_group.so.5
OLD_LIBS+=usr/lib32/pam_guest.so.5
OLD_LIBS+=usr/lib32/pam_krb5.so.5
OLD_LIBS+=usr/lib32/pam_ksu.so.5
OLD_LIBS+=usr/lib32/pam_lastlog.so.5
OLD_LIBS+=usr/lib32/pam_login_access.so.5
OLD_LIBS+=usr/lib32/pam_nologin.so.5
OLD_LIBS+=usr/lib32/pam_opie.so.5
OLD_LIBS+=usr/lib32/pam_opieaccess.so.5
OLD_LIBS+=usr/lib32/pam_passwdqc.so.5
OLD_LIBS+=usr/lib32/pam_permit.so.5
OLD_LIBS+=usr/lib32/pam_radius.so.5
OLD_LIBS+=usr/lib32/pam_rhosts.so.5
OLD_LIBS+=usr/lib32/pam_rootok.so.5
OLD_LIBS+=usr/lib32/pam_securetty.so.5
OLD_LIBS+=usr/lib32/pam_self.so.5
OLD_LIBS+=usr/lib32/pam_ssh.so.5
OLD_LIBS+=usr/lib32/pam_tacplus.so.5
OLD_LIBS+=usr/lib32/pam_unix.so.5
# 20160523: remove extranous ALTQ files
OLD_FILES+=usr/include/altq/altq_codel.h
OLD_FILES+=usr/include/altq/altq_fairq.h
# 20160519: remove DTrace Toolkit from base
OLD_FILES+=usr/sbin/dtruss
OLD_FILES+=usr/share/dtrace/toolkit/execsnoop
OLD_FILES+=usr/share/dtrace/toolkit/hotkernel
OLD_FILES+=usr/share/dtrace/toolkit/hotuser
OLD_FILES+=usr/share/dtrace/toolkit/opensnoop
OLD_FILES+=usr/share/dtrace/toolkit/procsystime
OLD_DIRS+=usr/share/dtrace/toolkit
OLD_FILES+=usr/share/man/man1/dtruss.1.gz
# 20160519: stale MLINK removed
OLD_FILES+=usr/share/man/man9/rman_await_resource.9.gz
# 20160517: ReiserFS removed
OLD_FILES+=usr/share/man/man5/reiserfs.5.gz
# 20160504: tests rework
OLD_FILES+=usr/tests/lib/libc/regex/data/README
# 20160430: kvm_getfiles(3) removed from kvm(3)
OLD_LIBS+=lib/libkvm.so.6
OLD_LIBS+=usr/lib32/libkvm.so.6
OLD_FILES+=usr/share/man/man3/kvm_getfiles.3.gz
# 20160423: remove mroute6d
OLD_FILES+=etc/rc.d/mroute6d
# 20160419: rename units.lib -> definitions.units
OLD_FILES+=usr/share/misc/units.lib
# 20160419: remove Big5HKSCS locales
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_HK.Big5HKSCS
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.Big5HKSCS
# 20160317: rman_res_t size bump to uintmax_t
OLD_LIBS+=usr/lib/libdevinfo.so.5
OLD_LIBS+=usr/lib32/libdevinfo.so.5
# 20160305: new clang import which bumps version from 3.7.1 to 3.8.0.
OLD_FILES+=usr/bin/macho-dump
OLD_FILES+=usr/bin/tblgen
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.1/include
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.1/lib
OLD_DIRS+=usr/lib/clang/3.7.1
# 20160301: Remove taskqueue_enqueue_fast
OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz
# 20160225: Remove casperd and libcapsicum.
OLD_FILES+=sbin/casperd
OLD_FILES+=etc/rc.d/casperd
OLD_FILES+=usr/share/man/man8/casperd.8.gz
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/include/libcapsicum_service.h
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz
OLD_FILES+=usr/include/libcapsicum_dns.h
OLD_FILES+=usr/include/libcapsicum_grp.h
OLD_FILES+=usr/include/libcapsicum_impl.h
OLD_FILES+=usr/include/libcapsicum_pwd.h
OLD_FILES+=usr/include/libcapsicum_random.h
OLD_FILES+=usr/include/libcapsicum_sysctl.h
OLD_FILES+=libexec/casper/dns
OLD_FILES+=libexec/casper/grp
OLD_FILES+=libexec/casper/pwd
OLD_FILES+=libexec/casper/random
OLD_FILES+=libexec/casper/sysctl
OLD_FILES+=libexec/casper/.debug/random.debug
OLD_FILES+=libexec/casper/.debug/dns.debug
OLD_FILES+=libexec/casper/.debug/sysctl.debug
OLD_FILES+=libexec/casper/.debug/pwd.debug
OLD_FILES+=libexec/casper/.debug/grp.debug
OLD_DIRS+=libexec/casper/.debug
OLD_DIRS+=libexec/casper
OLD_FILES+=usr/lib/libcapsicum.a
OLD_FILES+=usr/lib/libcapsicum.so
OLD_LIBS+=lib/libcapsicum.so.0
OLD_FILES+=usr/lib/libcapsicum_p.a
OLD_FILES+=usr/lib32/libcapsicum.a
OLD_FILES+=usr/lib32/libcapsicum.so
OLD_LIBS+=usr/lib32/libcapsicum.so.0
OLD_FILES+=usr/lib32/libcapsicum_p.a
# 20160223: functionality from mkulzma(1) merged into mkuzip(1)
OLD_FILES+=usr/bin/mkulzma
OLD_FILES+=usr/share/man/man4/geom_uncompress.4.gz
OLD_FILES+=usr/share/man/man8/mkulzma.8.gz
# 20160211: Remove obsolete unbound-control-setup
OLD_FILES+=usr/sbin/unbound-control-setup
# 20160121: cc.h moved
OLD_FILES+=usr/include/netinet/cc.h
# 20160116: Update mandoc to cvs snapshot 20160116
OLD_FILES+=usr/share/mdocml/example.style.css
OLD_FILES+=usr/share/mdocml/style.css
OLD_DIRS+=usr/share/mdocml
# 20160114: SA-16:06.snmpd
OLD_FILES+=usr/share/examples/etc/snmpd.config
# 20160107: GNU ld installed as ld.bfd and linked as ld
OLD_FILES+=usr/lib/debug/usr/bin/ld.debug
# 20151225: new clang import which bumps version from 3.7.0 to 3.7.1.
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.0/include
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.0/lib
OLD_DIRS+=usr/lib/clang/3.7.0
# 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406)
OLD_LIBS+=usr/lib/libelf.so.2
# 20151115: Fox bad upgrade scheme
OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8
# 20151107: String collation improvements
OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE
OLD_DIRS+=usr/share/locale/UTF-8
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME
OLD_DIRS+=usr/share/locale/kk_KZ.PT154/
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.US-ASCII
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC
OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.UTF-8
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.UTF-8
# 20151101: added missing _test suffix on multiple tests in lib/libc
OLD_FILES+=usr/tests/lib/libc/c063/faccessat
OLD_FILES+=usr/tests/lib/libc/c063/fchmodat
OLD_FILES+=usr/tests/lib/libc/c063/fchownat
OLD_FILES+=usr/tests/lib/libc/c063/fexecve
OLD_FILES+=usr/tests/lib/libc/c063/fstatat
OLD_FILES+=usr/tests/lib/libc/c063/linkat
OLD_FILES+=usr/tests/lib/libc/c063/mkdirat
OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat
OLD_FILES+=usr/tests/lib/libc/c063/mknodat
OLD_FILES+=usr/tests/lib/libc/c063/openat
OLD_FILES+=usr/tests/lib/libc/c063/readlinkat
OLD_FILES+=usr/tests/lib/libc/c063/renameat
OLD_FILES+=usr/tests/lib/libc/c063/symlinkat
OLD_FILES+=usr/tests/lib/libc/c063/unlinkat
OLD_FILES+=usr/tests/lib/libc/c063/utimensat
OLD_FILES+=usr/tests/lib/libc/string/memchr
OLD_FILES+=usr/tests/lib/libc/string/memcpy
OLD_FILES+=usr/tests/lib/libc/string/memmem
OLD_FILES+=usr/tests/lib/libc/string/memset
OLD_FILES+=usr/tests/lib/libc/string/strcat
OLD_FILES+=usr/tests/lib/libc/string/strchr
OLD_FILES+=usr/tests/lib/libc/string/strcmp
OLD_FILES+=usr/tests/lib/libc/string/strcpy
OLD_FILES+=usr/tests/lib/libc/string/strcspn
OLD_FILES+=usr/tests/lib/libc/string/strerror
OLD_FILES+=usr/tests/lib/libc/string/strlen
OLD_FILES+=usr/tests/lib/libc/string/strpbrk
OLD_FILES+=usr/tests/lib/libc/string/strrchr
OLD_FILES+=usr/tests/lib/libc/string/strspn
OLD_FILES+=usr/tests/lib/libc/string/swab
# 20151101: 430.status-rwho was renamed to 430.status-uptime
OLD_FILES+=etc/periodic/daily/430.status-rwho
# 20151030: OpenSSL 1.0.2d import
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz
OLD_LIBS+=lib/libcrypto.so.7
OLD_LIBS+=usr/lib/libssl.so.7
OLD_LIBS+=usr/lib32/libcrypto.so.7
OLD_LIBS+=usr/lib32/libssl.so.7
# 20151029: LinuxKPI moved to sys/compat/linuxkpi
OLD_FILES+=usr/include/dev/usb/usb_compat_linux.h
# 20151015: test symbols moved to /usr/lib/debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug
OLD_DIRS+=usr/tests/lib/libc/c063/.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug
OLD_DIRS+=usr/tests/lib/libc/db/.debug
OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug
OLD_DIRS+=usr/tests/lib/libc/gen/.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug
OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug
OLD_DIRS+=usr/tests/lib/libc/hash/.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug
OLD_DIRS+=usr/tests/lib/libc/inet/.debug
OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug
OLD_DIRS+=usr/tests/lib/libc/locale/.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug
OLD_DIRS+=usr/tests/lib/libc/net/.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug
OLD_DIRS+=usr/tests/lib/libc/regex/.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug
OLD_DIRS+=usr/tests/lib/libc/ssp/.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug
OLD_DIRS+=usr/tests/lib/libc/stdio/.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug
OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug
OLD_DIRS+=usr/tests/lib/libc/string/.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug
OLD_DIRS+=usr/tests/lib/libc/sys/.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug
OLD_DIRS+=usr/tests/lib/libc/termios/.debug
OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug
OLD_DIRS+=usr/tests/lib/libc/tls/.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug
OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug
OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug
OLD_DIRS+=usr/tests/lib/libcrypt/.debug
OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug
OLD_DIRS+=usr/tests/lib/libmp/.debug
OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug
OLD_DIRS+=usr/tests/lib/libnv/.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug
OLD_DIRS+=usr/tests/lib/libpam/.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug
OLD_DIRS+=usr/tests/lib/libproc/.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug
OLD_DIRS+=usr/tests/lib/librt/.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug
OLD_DIRS+=usr/tests/lib/libthr/.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug
OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug
OLD_DIRS+=usr/tests/lib/libutil/.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug
OLD_DIRS+=usr/tests/lib/libxo/.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug
OLD_DIRS+=usr/tests/lib/msun/.debug
OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug
OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug
OLD_DIRS+=usr/tests/sbin/devd/.debug
OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug
OLD_DIRS+=usr/tests/sbin/dhclient/.debug
OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug
OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug
OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug
OLD_DIRS+=usr/tests/sys/aio/.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug
OLD_DIRS+=usr/tests/sys/fifo/.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug
OLD_DIRS+=usr/tests/sys/file/.debug
OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug
OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug
OLD_DIRS+=usr/tests/sys/kern/.debug
OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug
OLD_DIRS+=usr/tests/sys/kern/execve/.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug
OLD_DIRS+=usr/tests/sys/kqueue/.debug
OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug
OLD_DIRS+=usr/tests/sys/mqueue/.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug
OLD_DIRS+=usr/tests/sys/netinet/.debug
OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug
OLD_DIRS+=usr/tests/sys/pjdfstest/.debug
OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug
OLD_DIRS+=usr/tests/sys/vm/.debug
OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug
# 20151015: Rename files due to file-installed-as-dir bug
OLD_FILES+=usr/share/doc/legal/realtek
OLD_FILES+=usr/share/doc/legal/realtek/LICENSE
OLD_DIRS+=usr/share/doc/legal/realtek
OLD_DIRS+=usr/share/doc/legal/intel_ipw
OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE
OLD_FILES+=usr/share/doc/legal/intel_iwn
OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_iwn
OLD_DIRS+=usr/share/doc/legal/intel_iwi
OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_wpi
OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE
# 20151006: new libc++ import
OLD_FILES+=usr/include/c++/__tuple_03
OLD_FILES+=usr/include/c++/v1/__tuple_03
OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03
# 20151006: new clang import which bumps version from 3.6.1 to 3.7.0.
OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.1/include
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.1/lib
OLD_DIRS+=usr/lib/clang/3.6.1
# 20150928: unused sgsmsg utility is removed
OLD_FILES+=usr/bin/sgsmsg
# 20150926: remove links to removed/unimplemented mbuf(9) macros
OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz
OLD_FILES+=usr/share/man/man9/MFREE.9.gz
# 20150818: *allocm() are gone in jemalloc 4.0.0
OLD_FILES+=usr/share/man/man3/allocm.3.gz
OLD_FILES+=usr/share/man/man3/dallocm.3.gz
OLD_FILES+=usr/share/man/man3/nallocm.3.gz
OLD_FILES+=usr/share/man/man3/rallocm.3.gz
OLD_FILES+=usr/share/man/man3/sallocm.3.gz
# 20150802: Remove netbsd's test on pw(8)
OLD_FILES+=usr/tests/usr.sbin/pw/pw_test
# 20150719: Remove libarchive.pc
OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc
# 20150705: Rename DTrace provider man pages.
OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz
# 20150704: nvlist private headers no longer installed
OLD_FILES+=usr/include/sys/nv_impl.h
OLD_FILES+=usr/include/sys/nvlist_impl.h
OLD_FILES+=usr/include/sys/nvpair_impl.h
# 20150624
OLD_LIBS+=usr/lib/libugidfw.so.4
OLD_LIBS+=usr/lib32/libugidfw.so.4
# 20150604: Move nvlist man pages to section 9.
OLD_FILES+=usr/share/man/man3/libnv.3.gz
OLD_FILES+=usr/share/man/man3/nv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz
# 20150702: Remove duplicated nvlist includes.
OLD_FILES+=usr/include/dnv.h
OLD_FILES+=usr/include/nv.h
# 20150528: PCI IOV device driver methods moved to a separate kobj interface.
OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz
OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz
OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz
# 20150525: new clang import which bumps version from 3.6.0 to 3.6.1.
OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.0/include
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.0/lib
OLD_DIRS+=usr/lib/clang/3.6.0
# 20150521
OLD_FILES+=usr/bin/demandoc
OLD_FILES+=usr/share/man/man1/demandoc.1.gz
OLD_FILES+=usr/share/man/man3/mandoc.3.gz
OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz
# 20150520
OLD_FILES+=usr/lib/libheimsqlite.a
OLD_FILES+=usr/lib/libheimsqlite.so
OLD_LIBS+=usr/lib/libheimsqlite.so.11
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib32/libheimsqlite.a
OLD_FILES+=usr/lib32/libheimsqlite.so
OLD_LIBS+=usr/lib32/libheimsqlite.so.11
OLD_FILES+=usr/lib32/libheimsqlite_p.a
# 20150518: tzdata2015c update
OLD_FILES+=usr/share/zoneinfo/America/Montreal
# 20150506
OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz
# 20150504
OLD_FILES+=usr/share/examples/etc/libmap32.conf
OLD_FILES+=usr/include/bsdstat.h
OLD_LIBS+=usr/lib32/private/libatf-c++.so.2
OLD_LIBS+=usr/lib32/private/libbsdstat.so.1
OLD_LIBS+=usr/lib32/private/libheimipcs.so.11
OLD_LIBS+=usr/lib32/private/libsqlite3.so.0
OLD_LIBS+=usr/lib32/private/libunbound.so.5
OLD_LIBS+=usr/lib32/private/libatf-c.so.1
OLD_LIBS+=usr/lib32/private/libheimipcc.so.11
OLD_LIBS+=usr/lib32/private/libldns.so.5
OLD_LIBS+=usr/lib32/private/libssh.so.5
OLD_LIBS+=usr/lib32/private/libucl.so.1
OLD_DIRS+=usr/lib32/private
OLD_LIBS+=usr/lib/private/libatf-c++.so.2
OLD_LIBS+=usr/lib/private/libbsdstat.so.1
OLD_LIBS+=usr/lib/private/libheimipcs.so.11
OLD_LIBS+=usr/lib/private/libsqlite3.so.0
OLD_LIBS+=usr/lib/private/libunbound.so.5
OLD_LIBS+=usr/lib/private/libatf-c.so.1
OLD_LIBS+=usr/lib/private/libheimipcc.so.11
OLD_LIBS+=usr/lib/private/libldns.so.5
OLD_LIBS+=usr/lib/private/libssh.so.5
OLD_LIBS+=usr/lib/private/libucl.so.1
OLD_DIRS+=usr/lib/private
# 20150501
OLD_FILES+=usr/bin/soeliminate
OLD_FILES+=usr/share/man/man1/soeliminate.1.gz
# 20150501: Remove the nvlist_.*[vf] functions manpages.
OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz
# 20150429: remove never written documentation
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
# 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test
OLD_FILES+=usr/tests/sys/kern/mmap_test
# 20150422: zlib.c moved from net to libkern
OLD_FILES+=usr/include/net/zlib.h
OLD_FILES+=usr/include/net/zutil.h
# 20150418
OLD_FILES+=sbin/mount_oldnfs
OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz
# 20150416: ALTQ moved to net/altq
OLD_FILES+=usr/include/altq/altq_rmclass_debug.h
OLD_FILES+=usr/include/altq/altq.h
OLD_FILES+=usr/include/altq/altq_cdnr.h
OLD_FILES+=usr/include/altq/altq_hfsc.h
OLD_FILES+=usr/include/altq/altq_priq.h
OLD_FILES+=usr/include/altq/altqconf.h
OLD_FILES+=usr/include/altq/altq_classq.h
OLD_FILES+=usr/include/altq/altq_red.h
OLD_FILES+=usr/include/altq/if_altq.h
OLD_FILES+=usr/include/altq/altq_var.h
OLD_FILES+=usr/include/altq/altq_rmclass.h
OLD_FILES+=usr/include/altq/altq_cbq.h
OLD_FILES+=usr/include/altq/altq_rio.h
OLD_DIRS+=usr/include/altq
# 20150330: ntp 4.2.8p1
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/man/man1/sntp.1.gz
# 20150329
.if ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/bootconfig.h
.endif
# 20150326
OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz
# 20150315: new clang import which bumps version from 3.5.1 to 3.6.0.
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.1/altivec.h
OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.1/cpuid.h
OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.1/immintrin.h
OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/module.modulemap
OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.1
OLD_DIRS+=usr/include/clang
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.1/lib
OLD_DIRS+=usr/lib/clang/3.5.1
# 20150302: binutils documentation distributed as a manpage
OLD_FILES+=usr/share/doc/binutils/as.txt
OLD_FILES+=usr/share/doc/binutils/ld.txt
OLD_DIRS+=usr/share/doc/binutils
# 20150222: Removed bcd(6) and ppt(6)
OLD_FILES+=usr/bin/bcd
OLD_FILES+=usr/bin/ppt
OLD_FILES+=usr/share/man/man6/bcd.6.gz
OLD_FILES+=usr/share/man/man6/ppt.6.gz
# 20150217: Removed remnants of ar(4) driver
OLD_FILES+=usr/include/dev/ic/hd64570.h
# 20150212: /usr/games moving into /usr/bin
OLD_FILES+=usr/games/bcd
OLD_FILES+=usr/games/caesar
OLD_FILES+=usr/games/factor
OLD_FILES+=usr/games/fortune
OLD_FILES+=usr/games/grdc
OLD_FILES+=usr/games/morse
OLD_FILES+=usr/games/number
OLD_FILES+=usr/games/pom
OLD_FILES+=usr/games/ppt
OLD_FILES+=usr/games/primes
OLD_FILES+=usr/games/random
OLD_FILES+=usr/games/rot13
OLD_FILES+=usr/games/strfile
OLD_FILES+=usr/games/unstr
OLD_DIRS+=usr/games
# 20150209: liblzma header
OLD_FILES+=usr/include/lzma/lzma.h
# 20150124: spl.9 and friends
OLD_FILES+=usr/share/man/man9/spl.9.gz
OLD_FILES+=usr/share/man/man9/spl0.9.gz
OLD_FILES+=usr/share/man/man9/splbio.9.gz
OLD_FILES+=usr/share/man/man9/splclock.9.gz
OLD_FILES+=usr/share/man/man9/splhigh.9.gz
OLD_FILES+=usr/share/man/man9/splimp.9.gz
OLD_FILES+=usr/share/man/man9/splnet.9.gz
OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz
OLD_FILES+=usr/share/man/man9/splsofttty.9.gz
OLD_FILES+=usr/share/man/man9/splstatclock.9.gz
OLD_FILES+=usr/share/man/man9/spltty.9.gz
OLD_FILES+=usr/share/man/man9/splvm.9.gz
OLD_FILES+=usr/share/man/man9/splx.9.gz
# 20150118: toeplitz.c moved from netinet to net
OLD_FILES+=usr/include/netinet/toeplitz.h
# 20150118: new clang import which bumps version from 3.5.0 to 3.5.1.
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.0/altivec.h
OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.0/cpuid.h
OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.0/immintrin.h
OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/module.modulemap
OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.0
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.0/lib
OLD_DIRS+=usr/lib/clang/3.5.0
# 20150102: removal of asr(4)
OLD_FILES+=usr/share/man/man4/asr.4.gz
# 20150102: removal of texinfo
OLD_FILES+=usr/bin/info
OLD_FILES+=usr/bin/infokey
OLD_FILES+=usr/bin/install-info
OLD_FILES+=usr/bin/makeinfo
OLD_FILES+=usr/bin/texindex
OLD_FILES+=usr/share/info/am-utils.info.gz
OLD_FILES+=usr/share/info/as.info.gz
OLD_FILES+=usr/share/info/binutils.info.gz
OLD_FILES+=usr/share/info/com_err.info.gz
OLD_FILES+=usr/share/info/cpp.info.gz
OLD_FILES+=usr/share/info/cppinternals.info.gz
OLD_FILES+=usr/share/info/diff.info.gz
OLD_FILES+=usr/share/info/dir
OLD_FILES+=usr/share/info/gcc.info.gz
OLD_FILES+=usr/share/info/gccint.info.gz
OLD_FILES+=usr/share/info/gdb.info.gz
OLD_FILES+=usr/share/info/gdbint.info.gz
OLD_FILES+=usr/share/info/gperf.info.gz
OLD_FILES+=usr/share/info/grep.info.gz
OLD_FILES+=usr/share/info/groff.info.gz
OLD_FILES+=usr/share/info/heimdal.info.gz
OLD_FILES+=usr/share/info/history.info.gz
OLD_FILES+=usr/share/info/info-stnd.info.gz
OLD_FILES+=usr/share/info/info.info.gz
OLD_FILES+=usr/share/info/ld.info.gz
OLD_FILES+=usr/share/info/regex.info.gz
OLD_FILES+=usr/share/info/rluserman.info.gz
OLD_FILES+=usr/share/info/stabs.info.gz
OLD_FILES+=usr/share/info/texinfo.info.gz
OLD_FILES+=usr/share/man/man1/info.1.gz
OLD_FILES+=usr/share/man/man1/infokey.1.gz
OLD_FILES+=usr/share/man/man1/install-info.1.gz
OLD_FILES+=usr/share/man/man1/makeinfo.1.gz
OLD_FILES+=usr/share/man/man1/texindex.1.gz
OLD_FILES+=usr/share/man/man5/info.5.gz
OLD_FILES+=usr/share/man/man5/texinfo.5.gz
OLD_DIRS+=usr/share/info
# 20141231: new clang import which bumps version from 3.4.1 to 3.5.0.
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4.1/altivec.h
OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4.1/cpuid.h
OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/immintrin.h
OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/module.map
OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.4.1
# 20141225: Remove gpib/ieee488
OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h
OLD_FILES+=usr/include/dev/ieee488/tnt4882.h
OLD_FILES+=usr/include/dev/ieee488/ugpib.h
OLD_FILES+=usr/include/dev/ieee488/upd7210.h
OLD_DIRS+=usr/include/dev/ieee488
OLD_FILES+=usr/include/gpib/gpib.h
OLD_DIRS+=usr/include/gpib
OLD_FILES+=usr/lib/libgpib.a
OLD_FILES+=usr/lib/libgpib_p.a
OLD_FILES+=usr/lib/libgpib.so
OLD_LIBS+=usr/lib/libgpib.so.3
OLD_FILES+=usr/lib/libgpib_p.a
OLD_FILES+=usr/lib32/libgpib.a
OLD_FILES+=usr/lib32/libgpib_p.a
OLD_FILES+=usr/lib32/libgpib.so
OLD_LIBS+=usr/lib32/libgpib.so.3
OLD_FILES+=usr/share/man/man3/gpib.3.gz
OLD_FILES+=usr/share/man/man3/ibclr.3.gz
OLD_FILES+=usr/share/man/man3/ibdev.3.gz
OLD_FILES+=usr/share/man/man3/ibdma.3.gz
OLD_FILES+=usr/share/man/man3/ibeos.3.gz
OLD_FILES+=usr/share/man/man3/ibeot.3.gz
OLD_FILES+=usr/share/man/man3/ibloc.3.gz
OLD_FILES+=usr/share/man/man3/ibonl.3.gz
OLD_FILES+=usr/share/man/man3/ibpad.3.gz
OLD_FILES+=usr/share/man/man3/ibrd.3.gz
OLD_FILES+=usr/share/man/man3/ibsad.3.gz
OLD_FILES+=usr/share/man/man3/ibsic.3.gz
OLD_FILES+=usr/share/man/man3/ibtmo.3.gz
OLD_FILES+=usr/share/man/man3/ibtrg.3.gz
OLD_FILES+=usr/share/man/man3/ibwrt.3.gz
OLD_FILES+=usr/share/man/man4/gpib.4.gz
OLD_FILES+=usr/share/man/man4/pcii.4.gz
OLD_FILES+=usr/share/man/man4/tnt4882.4.gz
# 20141224: libxo moved to /lib
OLD_LIBS+=usr/lib/libxo.so.0
# 20141223: remove in6_gif.h, in_gif.h and if_stf.h
OLD_FILES+=usr/include/net/if_stf.h
OLD_FILES+=usr/include/netinet/in_gif.h
OLD_FILES+=usr/include/netinet6/in6_gif.h
# 20141209: pw tests broken into a file per command
OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete
OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify
# 20141202: update to mandoc CVS 20141201
OLD_FILES+=usr.bin/preconv
OLD_FILES+=share/man/man1/preconv.1.gz
# 20141129: mrouted rc.d scripts removed from base
OLD_FILES+=etc/rc.d/mrouted
# 20141126: convert sbin/mdconfig/tests to ATF format tests
OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test
OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test
OLD_FILES+=usr/tests/sbin/mdconfig/run.pl
# 20141126: remove xform_ipip decapsulation fallback
OLD_FILES+=usr/include/netipsec/ipip_var.h
# 20141122: mandoc updated to 1.13.1
OLD_FILES+=usr/share/mdocml/external.png
# 20141111: SF_KQUEUE code removed
OLD_FILES+=usr/include/sys/sf_base.h
OLD_FILES+=usr/include/sys/sf_sync.h
# 20141109: faith/faithd removal
OLD_FILES+=etc/rc.d/faith
OLD_FILES+=usr/share/man/man4/faith.4.gz
OLD_FILES+=usr/share/man/man4/if_faith.4.gz
OLD_FILES+=usr/sbin/faithd
OLD_FILES+=usr/share/man/man8/faithd.8.gz
# 20141107: overhaul if_gre(4)
OLD_FILES+=usr/include/netinet/ip_gre.h
# 20141102: postrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/postrandom
# 20141031: initrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/initrandom
# 20141030: atf 0.21 import
OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz
# 20141028: debug files accidentally installed as directory name
OLD_FILES+=usr/lib/debug/usr/lib/i18n
OLD_FILES+=usr/lib/debug/usr/lib/private
OLD_FILES+=usr/lib/debug/usr/lib32/i18n
OLD_FILES+=usr/lib/debug/usr/lib32/private
# 20141015: OpenSSL 1.0.1j import
OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz
# 20141003: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.2
OLD_LIBS+=usr/lib32/libproc.so.2
# 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed
OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz
OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz
# 20140917: hv_kvpd rc.d script removed in favor of devd configuration
OLD_FILES+=etc/rc.d/hv_kvpd
# 20140917: libnv was accidentally being installed to /usr/lib instead of /lib
OLD_LIBS+=usr/lib/libnv.so.0
# 20140829: rc.d/kerberos removed
OLD_FILES+=etc/rc.d/kerberos
# 20140827: tzdata2014f import
OLD_FILES+=usr/share/zoneinfo/Asia/Chongqing
OLD_FILES+=usr/share/zoneinfo/Asia/Harbin
OLD_FILES+=usr/share/zoneinfo/Asia/Kashgar
# 20140814: libopie version bump
OLD_LIBS+=usr/lib/libopie.so.7
OLD_LIBS+=usr/lib32/libopie.so.7
# 20140811: otp-sha renamed to otp-sha1
OLD_FILES+=usr/bin/otp-sha
OLD_FILES+=usr/share/man/man1/otp-sha.1.gz
# 20140807: Remove private lib files that should not be installed.
OLD_FILES+=usr/lib32/private/libatf-c.a
OLD_FILES+=usr/lib32/private/libatf-c.so
OLD_FILES+=usr/lib32/private/libatf-c_p.a
OLD_FILES+=usr/lib32/private/libatf-c++.a
OLD_FILES+=usr/lib32/private/libatf-c++.so
OLD_FILES+=usr/lib32/private/libatf-c++_p.a
OLD_FILES+=usr/lib32/private/libbsdstat.a
OLD_FILES+=usr/lib32/private/libbsdstat.so
OLD_FILES+=usr/lib32/private/libbsdstat_p.a
OLD_FILES+=usr/lib32/private/libheimipcc.a
OLD_FILES+=usr/lib32/private/libheimipcc.so
OLD_FILES+=usr/lib32/private/libheimipcc_p.a
OLD_FILES+=usr/lib32/private/libheimipcs.a
OLD_FILES+=usr/lib32/private/libheimipcs.so
OLD_FILES+=usr/lib32/private/libheimipcs_p.a
OLD_FILES+=usr/lib32/private/libldns.a
OLD_FILES+=usr/lib32/private/libldns.so
OLD_FILES+=usr/lib32/private/libldns_p.a
OLD_FILES+=usr/lib32/private/libssh.a
OLD_FILES+=usr/lib32/private/libssh.so
OLD_FILES+=usr/lib32/private/libssh_p.a
OLD_FILES+=usr/lib32/private/libunbound.a
OLD_FILES+=usr/lib32/private/libunbound.so
OLD_FILES+=usr/lib32/private/libunbound_p.a
OLD_FILES+=usr/lib32/private/libucl.a
OLD_FILES+=usr/lib32/private/libucl.so
OLD_FILES+=usr/lib32/private/libucl_p.a
OLD_FILES+=usr/lib/private/libatf-c.a
OLD_FILES+=usr/lib/private/libatf-c.so
OLD_FILES+=usr/lib/private/libatf-c_p.a
OLD_FILES+=usr/lib/private/libatf-c++.a
OLD_FILES+=usr/lib/private/libatf-c++.so
OLD_FILES+=usr/lib/private/libatf-c++_p.a
OLD_FILES+=usr/lib/private/libbsdstat.a
OLD_FILES+=usr/lib/private/libbsdstat.so
OLD_FILES+=usr/lib/private/libbsdstat_p.a
OLD_FILES+=usr/lib/private/libheimipcc.a
OLD_FILES+=usr/lib/private/libheimipcc.so
OLD_FILES+=usr/lib/private/libheimipcc_p.a
OLD_FILES+=usr/lib/private/libheimipcs.a
OLD_FILES+=usr/lib/private/libheimipcs.so
OLD_FILES+=usr/lib/private/libheimipcs_p.a
OLD_FILES+=usr/lib/private/libldns.a
OLD_FILES+=usr/lib/private/libldns.so
OLD_FILES+=usr/lib/private/libldns_p.a
OLD_FILES+=usr/lib/private/libssh.a
OLD_FILES+=usr/lib/private/libssh.so
OLD_FILES+=usr/lib/private/libssh_p.a
OLD_FILES+=usr/lib/private/libunbound.a
OLD_FILES+=usr/lib/private/libunbound.so
OLD_FILES+=usr/lib/private/libunbound_p.a
OLD_FILES+=usr/lib/private/libucl.a
OLD_FILES+=usr/lib/private/libucl.so
OLD_FILES+=usr/lib/private/libucl_p.a
# 20140803: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz
# 20140731
OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz
# 20140728: libsbuf restored to old version.
OLD_LIBS+=lib/libsbuf.so.7
OLD_LIBS+=usr/lib32/libsbuf.so.7
# 20140728: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz
# 20140723: renamed to PCBGROUP.9
OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz
# 20140722: browse_packages_ftp.sh removed
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh
# 20140718: Remove obsolete man pages
OLD_FILES+=usr/share/man/man9/zero_copy.9.gz
OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz
# 20140718: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz
# 20140717: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz
# 20140716: Remove an incorrectly named man page
OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz
# 20140712: Removal of bsd.dtrace.mk
OLD_FILES+=usr/share/mk/bsd.dtrace.mk
# 20140705: turn libreadline into an internal lib
OLD_LIBS+=lib/libreadline.so.8
OLD_FILES+=usr/lib/libreadline.a
OLD_FILES+=usr/lib/libreadline_p.a
OLD_FILES+=usr/lib/libreadline.so
OLD_FILES+=usr/lib/libhistory.a
OLD_FILES+=usr/lib/libhistory_p.a
OLD_FILES+=usr/lib/libhistory.so
OLD_LIBS+=usr/lib/libhistory.so.8
OLD_FILES+=usr/lib32/libhistory.a
OLD_FILES+=usr/lib32/libhistory.so
OLD_LIBS+=usr/lib32/libhistory.so.8
OLD_FILES+=usr/lib32/libhistory_p.a
OLD_FILES+=usr/lib32/libreadline.a
OLD_FILES+=usr/lib32/libreadline.so
OLD_LIBS+=usr/lib32/libreadline.so.8
OLD_FILES+=usr/lib32/libreadline_p.a
OLD_FILES+=usr/include/readline/chardefs.h
OLD_FILES+=usr/include/readline/history.h
OLD_FILES+=usr/include/readline/keymaps.h
OLD_FILES+=usr/include/readline/readline.h
OLD_FILES+=usr/include/readline/tilde.h
OLD_FILES+=usr/include/readline/rlconf.h
OLD_FILES+=usr/include/readline/rlstdc.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_DIRS+=usr/include/readline
OLD_FILES+=usr/share/info/readline.info.gz
OLD_FILES+=usr/share/man/man3/readline.3.gz
OLD_FILES+=usr/share/man/man3/rlhistory.3.gz
# 20140625: csup removal
OLD_FILES+=usr/bin/csup
OLD_FILES+=usr/bin/cpasswd
OLD_FILES+=usr/share/man/man1/csup.1.gz
OLD_FILES+=usr/share/man/man1/cpasswd.1.gz
OLD_FILES+=usr/share/examples/cvsup/README
OLD_FILES+=usr/share/examples/cvsup/cvs-supfile
OLD_FILES+=usr/share/examples/cvsup/stable-supfile
OLD_FILES+=usr/share/examples/cvsup/standard-supfile
OLD_DIRS+=usr/share/examples/cvsup
# 20140614: send-pr removal
OLD_FILES+=usr/bin/sendbug
OLD_FILES+=usr/share/info/send-pr.info.gz
OLD_FILES+=usr/share/man/man1/send-pr.1.gz
OLD_FILES+=usr/share/man/man1/sendbug.1.gz
OLD_FILES+=etc/gnats/freefall
OLD_DIRS+=etc/gnats
# 20140512: new clang import which bumps version from 3.4 to 3.4.1.
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4/altivec.h
OLD_FILES+=usr/include/clang/3.4/ammintrin.h
OLD_FILES+=usr/include/clang/3.4/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4/avxintrin.h
OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4/cpuid.h
OLD_FILES+=usr/include/clang/3.4/emmintrin.h
OLD_FILES+=usr/include/clang/3.4/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4/immintrin.h
OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4/mmintrin.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_FILES+=usr/include/clang/3.4/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4/shaintrin.h
OLD_FILES+=usr/include/clang/3.4/smmintrin.h
OLD_FILES+=usr/include/clang/3.4/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4/x86intrin.h
OLD_FILES+=usr/include/clang/3.4/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4/xopintrin.h
OLD_FILES+=usr/include/clang/3.4/arm_neon.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_DIRS+=usr/include/clang/3.4
# 20140505: Bogusly installing src.opts.mk
OLD_FILES+=usr/share/mk/src.opts.mk
# 20140505: Reject PR kern/187551
OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test
# 20140502: Removal of lindev(4)
OLD_FILES+=usr/share/man/man4/lindev.4.gz
# 20140425
OLD_FILES+=usr/lib/libssp_p.a
OLD_FILES+=usr/lib/libstand_p.a
OLD_FILES+=usr/lib32/libssp_p.a
OLD_FILES+=usr/lib32/libstand_p.a
# 20140314: AppleTalk
OLD_DIRS+=usr/include/netatalk
OLD_FILES+=usr/include/netatalk/aarp.h
OLD_FILES+=usr/include/netatalk/at.h
OLD_FILES+=usr/include/netatalk/at_extern.h
OLD_FILES+=usr/include/netatalk/at_var.h
OLD_FILES+=usr/include/netatalk/ddp.h
OLD_FILES+=usr/include/netatalk/ddp_pcb.h
OLD_FILES+=usr/include/netatalk/ddp_var.h
OLD_FILES+=usr/include/netatalk/endian.h
OLD_FILES+=usr/include/netatalk/phase2.h
# 20140314: Remove IPX/SPX
OLD_LIBS+=lib/libipx.so.5
OLD_FILES+=usr/include/netipx/ipx.h
OLD_FILES+=usr/include/netipx/ipx_if.h
OLD_FILES+=usr/include/netipx/ipx_pcb.h
OLD_FILES+=usr/include/netipx/ipx_var.h
OLD_FILES+=usr/include/netipx/spx.h
OLD_FILES+=usr/include/netipx/spx_debug.h
OLD_FILES+=usr/include/netipx/spx_timer.h
OLD_FILES+=usr/include/netipx/spx_var.h
OLD_DIRS+=usr/include/netipx
OLD_FILES+=usr/lib/libipx.a
OLD_FILES+=usr/lib/libipx.so
OLD_FILES+=usr/lib/libipx_p.a
OLD_FILES+=usr/lib32/libipx.a
OLD_FILES+=usr/lib32/libipx.so
OLD_LIBS+=usr/lib32/libipx.so.5
OLD_FILES+=usr/lib32/libipx_p.a
OLD_FILES+=usr/sbin/IPXrouted
OLD_FILES+=usr/share/man/man3/ipx.3.gz
OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz
OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz
OLD_FILES+=usr/share/man/man4/ef.4.gz
OLD_FILES+=usr/share/man/man4/if_ef.4.gz
OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz
# 20140314: bsdconfig usermgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput
# 20140307: bsdconfig groupmgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput
# 20140223: Remove libyaml
OLD_FILES+=usr/lib/private/libyaml.a
OLD_FILES+=usr/lib/private/libyaml.so
OLD_LIBS+=usr/lib/private/libyaml.so.1
OLD_FILES+=usr/lib/private/libyaml_p.a
OLD_FILES+=usr/lib32/private/libyaml.a
OLD_FILES+=usr/lib32/private/libyaml.so
OLD_LIBS+=usr/lib32/private/libyaml.so.1
OLD_FILES+=usr/lib32/private/libyaml_p.a
# 20140216: new clang import which bumps version from 3.3 to 3.4.
OLD_FILES+=usr/bin/llvm-prof
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.3/altivec.h
OLD_FILES+=usr/include/clang/3.3/ammintrin.h
OLD_FILES+=usr/include/clang/3.3/avx2intrin.h
OLD_FILES+=usr/include/clang/3.3/avxintrin.h
OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.3/bmiintrin.h
OLD_FILES+=usr/include/clang/3.3/cpuid.h
OLD_FILES+=usr/include/clang/3.3/emmintrin.h
OLD_FILES+=usr/include/clang/3.3/f16cintrin.h
OLD_FILES+=usr/include/clang/3.3/fma4intrin.h
OLD_FILES+=usr/include/clang/3.3/fmaintrin.h
OLD_FILES+=usr/include/clang/3.3/immintrin.h
OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.3/mm3dnow.h
OLD_FILES+=usr/include/clang/3.3/mm_malloc.h
OLD_FILES+=usr/include/clang/3.3/mmintrin.h
OLD_FILES+=usr/include/clang/3.3/module.map
OLD_FILES+=usr/include/clang/3.3/nmmintrin.h
OLD_FILES+=usr/include/clang/3.3/pmmintrin.h
OLD_FILES+=usr/include/clang/3.3/popcntintrin.h
OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.3/rtmintrin.h
OLD_FILES+=usr/include/clang/3.3/smmintrin.h
OLD_FILES+=usr/include/clang/3.3/tmmintrin.h
OLD_FILES+=usr/include/clang/3.3/wmmintrin.h
OLD_FILES+=usr/include/clang/3.3/x86intrin.h
OLD_FILES+=usr/include/clang/3.3/xmmintrin.h
OLD_FILES+=usr/include/clang/3.3/xopintrin.h
OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz
OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz
OLD_DIRS+=usr/include/clang/3.3
# 20140216: nve(4) removed
OLD_FILES+=usr/share/man/man4/if_nve.4.gz
OLD_FILES+=usr/share/man/man4/nve.4.gz
# 20140205: Open Firmware device moved
OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h
# 20140128: libelf and libdwarf import
OLD_LIBS+=usr/lib/libelf.so.1
OLD_LIBS+=usr/lib32/libelf.so.1
OLD_LIBS+=usr/lib/libdwarf.so.3
OLD_LIBS+=usr/lib32/libdwarf.so.3
# 20140123: apicvar header moved to x86
OLD_FILES+=usr/include/machine/apicvar.h
# 20131215: libcam version bumped
OLD_LIBS+=lib/libcam.so.6 usr/lib32/libcam.so.6
# 20131202: libcapsicum and libcasper moved to /lib/
OLD_LIBS+=usr/lib/libcapsicum.so.0
OLD_LIBS+=usr/lib/libcasper.so.0
# 20131109: extattr(2) mlinks fixed
OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz
OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz
# 20131107: example files removed
OLD_FILES+=usr/share/examples/libusb20/aux.c
OLD_FILES+=usr/share/examples/libusb20/aux.h
# 20131105: tzdata 2013h import
OLD_FILES+=usr/share/zoneinfo/America/Shiprock
OLD_FILES+=usr/share/zoneinfo/Antarctica/South_Pole
# 20131103: WITH_LIBICONV_COMPAT removal
OLD_FILES+=usr/include/_libiconv_compat.h
OLD_FILES+=usr/lib/libiconv.a
OLD_FILES+=usr/lib/libiconv.so
OLD_FILES+=usr/lib/libiconv.so.3
OLD_FILES+=usr/lib/libiconv_p.a
OLD_FILES+=usr/lib32/libiconv.a
OLD_FILES+=usr/lib32/libiconv.so
OLD_FILES+=usr/lib32/libiconv.so.3
OLD_FILES+=usr/lib32/libiconv_p.a
# 20131103: removal of utxrm(8), use 'utx rm' instead.
OLD_FILES+=usr/sbin/utxrm
OLD_FILES+=usr/share/man/man8/utxrm.8.gz
# 20131031: pkg_install has been removed
OLD_FILES+=etc/periodic/daily/220.backup-pkgdb
OLD_FILES+=etc/periodic/daily/490.status-pkg-changes
OLD_FILES+=etc/periodic/security/460.chkportsum
OLD_FILES+=etc/periodic/weekly/400.status-pkg
OLD_FILES+=usr/sbin/pkg_add
OLD_FILES+=usr/sbin/pkg_create
OLD_FILES+=usr/sbin/pkg_delete
OLD_FILES+=usr/sbin/pkg_info
OLD_FILES+=usr/sbin/pkg_updating
OLD_FILES+=usr/sbin/pkg_version
OLD_FILES+=usr/share/man/man1/pkg_add.1.gz
OLD_FILES+=usr/share/man/man1/pkg_create.1.gz
OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz
OLD_FILES+=usr/share/man/man1/pkg_info.1.gz
OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz
OLD_FILES+=usr/share/man/man1/pkg_version.1.gz
# 20131030: /etc/keys moved to /usr/share/keys
OLD_DIRS+=etc/keys
OLD_DIRS+=etc/keys/pkg
OLD_DIRS+=etc/keys/pkg/revoked
OLD_DIRS+=etc/keys/pkg/trusted
OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301
# 20131028: ng_fec(4) removed
OLD_FILES+=usr/include/netgraph/ng_fec.h
OLD_FILES+=usr/share/man/man4/ng_fec.4.gz
# 20131027: header moved
OLD_FILES+=usr/include/net/pf_mtag.h
# 20131023: remove never used iscsi directory
OLD_DIRS+=usr/share/examples/iscsi
# 20131021: isf(4) removed
OLD_FILES+=usr/sbin/isfctl
OLD_FILES+=usr/share/man/man4/isf.4.gz
OLD_FILES+=usr/share/man/man8/isfctl.8.gz
# 20131014: libbsdyml becomes private
OLD_FILES+=usr/lib/libbsdyml.a
OLD_FILES+=usr/lib/libbsdyml.so
OLD_LIBS+=usr/lib/libbsdyml.so.0
OLD_FILES+=usr/lib/libbsdyml_p.a
OLD_FILES+=usr/lib32/libbsdyml.a
OLD_FILES+=usr/lib32/libbsdyml.so
OLD_LIBS+=usr/lib32/libbsdyml.so.0
OLD_FILES+=usr/lib32/libbsdyml_p.a
OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz
OLD_FILES+=usr/include/bsdyml.h
# 20131013: Removal of the ATF tools
OLD_FILES+=etc/atf/FreeBSD.conf
OLD_FILES+=etc/atf/atf-run.hooks
OLD_FILES+=etc/atf/common.conf
OLD_FILES+=usr/bin/atf-config
OLD_FILES+=usr/bin/atf-report
OLD_FILES+=usr/bin/atf-run
OLD_FILES+=usr/bin/atf-version
OLD_FILES+=usr/share/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/tests-results.css
OLD_FILES+=usr/share/man/man1/atf-config.1.gz
OLD_FILES+=usr/share/man/man1/atf-report.1.gz
OLD_FILES+=usr/share/man/man1/atf-run.1.gz
OLD_FILES+=usr/share/man/man1/atf-version.1.gz
OLD_FILES+=usr/share/man/man5/atf-formats.5.gz
OLD_FILES+=usr/share/xml/atf/tests-results.dtd
OLD_FILES+=usr/share/xsl/atf/tests-results.xsl
# 20131009: freebsd-version moved from /libexec to /bin
OLD_FILES+=libexec/freebsd-version
# 20131001: ar and ranlib from binutils not used
OLD_FILES+=usr/bin/gnu-ar
OLD_FILES+=usr/bin/gnu-ranlib
OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz
OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz
# 20130930: BIND removed from base
OLD_FILES+=etc/mtree/BIND.chroot.dist
OLD_FILES+=etc/namedb
OLD_FILES+=etc/periodic/daily/470.status-named
OLD_FILES+=usr/bin/dig
OLD_FILES+=usr/bin/nslookup
OLD_FILES+=usr/bin/nsupdate
OLD_DIRS+=usr/include/lwres
OLD_FILES+=usr/include/lwres/context.h
OLD_FILES+=usr/include/lwres/int.h
OLD_FILES+=usr/include/lwres/ipv6.h
OLD_FILES+=usr/include/lwres/lang.h
OLD_FILES+=usr/include/lwres/list.h
OLD_FILES+=usr/include/lwres/lwbuffer.h
OLD_FILES+=usr/include/lwres/lwpacket.h
OLD_FILES+=usr/include/lwres/lwres.h
OLD_FILES+=usr/include/lwres/net.h
OLD_FILES+=usr/include/lwres/netdb.h
OLD_FILES+=usr/include/lwres/platform.h
OLD_FILES+=usr/include/lwres/result.h
OLD_FILES+=usr/include/lwres/string.h
OLD_FILES+=usr/include/lwres/version.h
OLD_FILES+=usr/lib/liblwres.a
OLD_FILES+=usr/lib/liblwres.so
OLD_LIBS+=usr/lib/liblwres.so.90
OLD_FILES+=usr/lib/liblwres_p.a
OLD_FILES+=usr/sbin/arpaname
OLD_FILES+=usr/sbin/ddns-confgen
OLD_FILES+=usr/sbin/dnssec-dsfromkey
OLD_FILES+=usr/sbin/dnssec-keyfromlabel
OLD_FILES+=usr/sbin/dnssec-keygen
OLD_FILES+=usr/sbin/dnssec-revoke
OLD_FILES+=usr/sbin/dnssec-settime
OLD_FILES+=usr/sbin/dnssec-signzone
OLD_FILES+=usr/sbin/dnssec-verify
OLD_FILES+=usr/sbin/genrandom
OLD_FILES+=usr/sbin/isc-hmac-fixup
OLD_FILES+=usr/sbin/lwresd
OLD_FILES+=usr/sbin/named
OLD_FILES+=usr/sbin/named-checkconf
OLD_FILES+=usr/sbin/named-checkzone
OLD_FILES+=usr/sbin/named-compilezone
OLD_FILES+=usr/sbin/named-journalprint
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/sbin/nsec3hash
OLD_FILES+=usr/sbin/rndc
OLD_FILES+=usr/sbin/rndc-confgen
OLD_DIRS+=usr/share/doc/bind9
OLD_FILES+=usr/share/doc/bind9/CHANGES
OLD_FILES+=usr/share/doc/bind9/COPYRIGHT
OLD_FILES+=usr/share/doc/bind9/FAQ
OLD_FILES+=usr/share/doc/bind9/HISTORY
OLD_FILES+=usr/share/doc/bind9/README
OLD_DIRS+=usr/share/doc/bind9/arm
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf
OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html
OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html
OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html
OLD_FILES+=usr/share/doc/bind9/arm/man.host.html
OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html
OLD_DIRS+=usr/share/doc/bind9/misc
OLD_FILES+=usr/share/doc/bind9/misc/dnssec
OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl
OLD_FILES+=usr/share/doc/bind9/misc/ipv6
OLD_FILES+=usr/share/doc/bind9/misc/migration
OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9
OLD_FILES+=usr/share/doc/bind9/misc/options
OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance
OLD_FILES+=usr/share/doc/bind9/misc/roadmap
OLD_FILES+=usr/share/doc/bind9/misc/sdb
OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl
OLD_FILES+=usr/share/man/man1/arpaname.1.gz
OLD_FILES+=usr/share/man/man1/dig.1.gz
OLD_FILES+=usr/share/man/man1/nslookup.1.gz
OLD_FILES+=usr/share/man/man1/nsupdate.1.gz
OLD_FILES+=usr/share/man/man3/lwres.3.gz
OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz
OLD_FILES+=usr/share/man/man3/lwres_config.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz
OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz
OLD_FILES+=usr/share/man/man5/named.conf.5.gz
OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz
OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz
OLD_FILES+=usr/share/man/man8/genrandom.8.gz
OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz
OLD_FILES+=usr/share/man/man8/lwresd.8.gz
OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz
OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz
OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz
OLD_FILES+=usr/share/man/man8/named.8.gz
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz
OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz
OLD_FILES+=usr/share/man/man8/rndc.8.gz
OLD_DIRS+=var/named/dev
OLD_DIRS+=var/named/etc
OLD_DIRS+=var/named/etc/namedb
OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev
OLD_DIRS+=var/named/etc/namedb/dynamic
OLD_FILES+=var/named/etc/namedb/make-localhost
OLD_DIRS+=var/named/etc/namedb/master
OLD_FILES+=var/named/etc/namedb/master/empty.db
OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db
OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db
#OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out
OLD_FILES+=var/named/etc/namedb/named.root
OLD_DIRS+=var/named/etc/namedb/working
OLD_DIRS+=var/named/etc/namedb/slave
OLD_DIRS+=var/named/var
OLD_DIRS+=var/named/var/dump
OLD_DIRS+=var/named/var/log
OLD_DIRS+=var/named/var/run
OLD_DIRS+=var/named/var/run/named
OLD_DIRS+=var/named/var/stats
OLD_DIRS+=var/run/named
# 20130923: example moved
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh
# 20130908: libssh becomes private
OLD_FILES+=usr/lib/libssh.a
OLD_FILES+=usr/lib/libssh.so
OLD_LIBS+=usr/lib/libssh.so.5
OLD_FILES+=usr/lib/libssh_p.a
OLD_FILES+=usr/lib32/libssh.a
OLD_FILES+=usr/lib32/libssh.so
OLD_LIBS+=usr/lib32/libssh.so.5
OLD_FILES+=usr/lib32/libssh_p.a
# 20130903: gnupatch is no more
OLD_FILES+=usr/bin/gnupatch
OLD_FILES+=usr/share/man/man1/gnupatch.1.gz
# 20130829: bsdpatch is patch unconditionally
OLD_FILES+=usr/bin/bsdpatch
OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz
# 20130822: bind 9.9.3-P2 import
OLD_LIBS+=usr/lib/liblwres.so.80
# 20130814: vm_page_busy(9)
OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz
# 20130710: libkvm version bump
OLD_LIBS+=lib/libkvm.so.5
OLD_LIBS+=usr/lib32/libkvm.so.5
# 20130623: dialog update from 1.1 to 1.2
OLD_LIBS+=usr/lib/libdialog.so.7
OLD_LIBS+=usr/lib32/libdialog.so.7
# 20130616: vfs_mount.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz
# 20130614: remove CVS from base
OLD_FILES+=usr/bin/cvs
OLD_FILES+=usr/bin/cvsbug
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz
OLD_DIRS+=usr/share/doc/psd/28.cvs
OLD_FILES+=usr/share/examples/cvs/contrib/README
OLD_FILES+=usr/share/examples/cvs/contrib/clmerge
OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist
OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep
OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor
OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man
OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man
OLD_FILES+=usr/share/examples/cvs/contrib/descend.man
OLD_FILES+=usr/share/examples/cvs/contrib/easy-import
OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc
OLD_FILES+=usr/share/examples/cvs/contrib/log
OLD_FILES+=usr/share/examples/cvs/contrib/log_accum
OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe
OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs
OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log
OLD_FILES+=usr/share/examples/cvs/contrib/rcslock
OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs
OLD_DIRS+=usr/share/examples/cvs/contrib
OLD_DIRS+=usr/share/examples/cvs
OLD_FILES+=usr/share/info/cvs.info.gz
OLD_FILES+=usr/share/info/cvsclient.info.gz
OLD_FILES+=usr/share/man/man1/cvs.1.gz
OLD_FILES+=usr/share/man/man5/cvs.5.gz
OLD_FILES+=usr/share/man/man8/cvsbug.8.gz
# 20130607: WITH_DEBUG_FILES added
OLD_FILES+=lib/libufs.so.6.symbols
OLD_FILES+=usr/lib32/libufs.so.6.symbols
# 20130417: nfs fha moved from nfsserver to nfs
OLD_FILES+=usr/include/nfsserver/nfs_fha.h
# 20130411: new clang import which bumps version from 3.2 to 3.3.
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.2/altivec.h
OLD_FILES+=usr/include/clang/3.2/ammintrin.h
OLD_FILES+=usr/include/clang/3.2/avx2intrin.h
OLD_FILES+=usr/include/clang/3.2/avxintrin.h
OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.2/bmiintrin.h
OLD_FILES+=usr/include/clang/3.2/cpuid.h
OLD_FILES+=usr/include/clang/3.2/emmintrin.h
OLD_FILES+=usr/include/clang/3.2/f16cintrin.h
OLD_FILES+=usr/include/clang/3.2/fma4intrin.h
OLD_FILES+=usr/include/clang/3.2/fmaintrin.h
OLD_FILES+=usr/include/clang/3.2/immintrin.h
OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.2/mm3dnow.h
OLD_FILES+=usr/include/clang/3.2/mm_malloc.h
OLD_FILES+=usr/include/clang/3.2/mmintrin.h
OLD_FILES+=usr/include/clang/3.2/module.map
OLD_FILES+=usr/include/clang/3.2/nmmintrin.h
OLD_FILES+=usr/include/clang/3.2/pmmintrin.h
OLD_FILES+=usr/include/clang/3.2/popcntintrin.h
OLD_FILES+=usr/include/clang/3.2/rtmintrin.h
OLD_FILES+=usr/include/clang/3.2/smmintrin.h
OLD_FILES+=usr/include/clang/3.2/tmmintrin.h
OLD_FILES+=usr/include/clang/3.2/wmmintrin.h
OLD_FILES+=usr/include/clang/3.2/x86intrin.h
OLD_FILES+=usr/include/clang/3.2/xmmintrin.h
OLD_FILES+=usr/include/clang/3.2/xopintrin.h
OLD_DIRS+=usr/include/clang/3.2
# 20130404: legacy ATA stack removed
OLD_FILES+=etc/periodic/daily/405.status-ata-raid
OLD_FILES+=rescue/atacontrol
OLD_FILES+=sbin/atacontrol
OLD_FILES+=usr/share/man/man8/atacontrol.8.gz
OLD_FILES+=usr/share/man/man4/atapicam.4.gz
OLD_FILES+=usr/share/man/man4/ataraid.4.gz
OLD_FILES+=usr/sbin/burncd
OLD_FILES+=usr/share/man/man8/burncd.8.gz
# 20130316: vinum.4 removed
OLD_FILES+=usr/share/man/man4/vinum.4.gz
# 20130312: fortunes-o removed
OLD_FILES+=usr/share/games/fortune/fortunes-o
OLD_FILES+=usr/share/games/fortune/fortunes-o.dat
# 20130311: Ports are no more available via cvsup
OLD_FILES+=usr/share/examples/cvsup/ports-supfile
OLD_FILES+=usr/share/examples/cvsup/refuse
OLD_FILES+=usr/share/examples/cvsup/refuse.README
# 20130309: NWFS and NCP supports removed
OLD_FILES+=usr/bin/ncplist
OLD_FILES+=usr/bin/ncplogin
OLD_FILES+=usr/bin/ncplogout
OLD_FILES+=usr/include/fs/nwfs/nwfs.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h
OLD_DIRS+=usr/include/fs/nwfs
OLD_FILES+=usr/include/netncp/ncp.h
OLD_FILES+=usr/include/netncp/ncp_cfg.h
OLD_FILES+=usr/include/netncp/ncp_conn.h
OLD_FILES+=usr/include/netncp/ncp_file.h
OLD_FILES+=usr/include/netncp/ncp_lib.h
OLD_FILES+=usr/include/netncp/ncp_ncp.h
OLD_FILES+=usr/include/netncp/ncp_nls.h
OLD_FILES+=usr/include/netncp/ncp_rcfile.h
OLD_FILES+=usr/include/netncp/ncp_rq.h
OLD_FILES+=usr/include/netncp/ncp_sock.h
OLD_FILES+=usr/include/netncp/ncp_subr.h
OLD_FILES+=usr/include/netncp/ncp_user.h
OLD_FILES+=usr/include/netncp/ncpio.h
OLD_FILES+=usr/include/netncp/nwerror.h
OLD_DIRS+=usr/include/netncp
OLD_FILES+=usr/lib/libncp.a
OLD_FILES+=usr/lib/libncp.so
OLD_LIBS+=usr/lib/libncp.so.4
OLD_FILES+=usr/lib/libncp_p.a
OLD_FILES+=usr/lib32/libncp.a
OLD_FILES+=usr/lib32/libncp.so
OLD_LIBS+=usr/lib32/libncp.so.4
OLD_FILES+=usr/lib32/libncp_p.a
OLD_FILES+=usr/sbin/mount_nwfs
OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc
OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample
OLD_DIRS+=usr/share/examples/nwclient
OLD_FILES+=usr/share/man/man1/ncplist.1.gz
OLD_FILES+=usr/share/man/man1/ncplogin.1.gz
OLD_FILES+=usr/share/man/man1/ncplogout.1.gz
OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz
# 20130302: NTFS support removed
OLD_FILES+=rescue/mount_ntfs
OLD_FILES+=sbin/mount_ntfs
OLD_FILES+=usr/include/fs/ntfs/ntfs.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h
OLD_DIRS+=usr/include/fs/ntfs
OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz
# 20130302: PORTALFS support removed
OLD_FILES+=usr/include/fs/portalfs/portal.h
OLD_DIRS+=usr/include/fs/portalfs
OLD_FILES+=usr/sbin/mount_portalfs
OLD_FILES+=usr/share/examples/portal/README
OLD_FILES+=usr/share/examples/portal/portal.conf
OLD_DIRS+=usr/share/examples/portal
OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz
# 20130302: CODAFS support removed
OLD_FILES+=usr/share/man/man4/coda.4.gz
# 20130302: XFS support removed
OLD_FILES+=usr/share/man/man5/xfs.5.gz
# 20130302: Capsicum overhaul
OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz
OLD_FILES+=usr/share/man/man2/cap_new.2.gz
# 20130213: OpenSSL 1.0.1e import
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz
# 20130116: removed long unused directories for .1aout section manpages
OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout
OLD_FILES+=usr/share/man/en.UTF-8/man1aout
OLD_DIRS+=usr/share/man/man1aout
OLD_DIRS+=usr/share/man/cat1aout
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout
OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout
# 20130110: bsd.compat.mk removed
OLD_FILES+=usr/share/mk/bsd.compat.mk
# 20130103: gnats-supfile removed
OLD_FILES+=usr/share/examples/cvsup/gnats-supfile
# 20121230: libdisk removed
OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h
OLD_FILES+=usr/lib/libdisk.a usr/lib32/libdisk.a
# 20121230: remove wrongly created directories for auditdistd
OLD_DIRS+=var/dist
OLD_DIRS+=var/remote
# 20121114: zpool-features manual page moved from section 5 to 7
OLD_FILES+=usr/share/man/man5/zpool-features.5.gz
# 20121022: remove harp, hfa and idt man page
OLD_FILES+=usr/share/man/man4/harp.4.gz
OLD_FILES+=usr/share/man/man4/hfa.4.gz
OLD_FILES+=usr/share/man/man4/idt.4.gz
OLD_FILES+=usr/share/man/man4/if_idt.4.gz
# 20121022: VFS_LOCK_GIANT elimination
OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz
OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz
# 20121004: remove incomplete unwind.h
OLD_FILES+=usr/include/clang/3.2/unwind.h
# 20120910: NetBSD compat shims removed
OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h
OLD_FILES+=usr/include/sys/device_port.h
# 20120909: doc and www supfiles removed
OLD_FILES+=usr/share/examples/cvsup/doc-supfile
OLD_FILES+=usr/share/examples/cvsup/www-supfile
# 20120908: pf cleanup
OLD_FILES+=usr/include/net/if_pflow.h
# 20120816: new clang import which bumps version from 3.1 to 3.2
OLD_FILES+=usr/bin/llvm-ld
OLD_FILES+=usr/bin/llvm-stub
OLD_FILES+=usr/include/clang/3.1/altivec.h
OLD_FILES+=usr/include/clang/3.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.1/cpuid.h
OLD_FILES+=usr/include/clang/3.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.1/immintrin.h
OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.1/module.map
OLD_FILES+=usr/include/clang/3.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.1/unwind.h
OLD_FILES+=usr/include/clang/3.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.1/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.1
OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz
# 20120712: OpenSSL 1.0.1c import
OLD_LIBS+=lib/libcrypto.so.6
OLD_LIBS+=usr/lib/libssl.so.6
OLD_LIBS+=usr/lib32/libcrypto.so.6
OLD_LIBS+=usr/lib32/libssl.so.6
OLD_FILES+=usr/include/openssl/aes_locl.h
OLD_FILES+=usr/include/openssl/bio_lcl.h
OLD_FILES+=usr/include/openssl/e_os.h
OLD_FILES+=usr/include/openssl/fips.h
OLD_FILES+=usr/include/openssl/fips_rand.h
OLD_FILES+=usr/include/openssl/pq_compat.h
OLD_FILES+=usr/include/openssl/tmdiff.h
OLD_FILES+=usr/include/openssl/ui_locl.h
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz
# 20120621: remove old man page
OLD_FILES+=usr/share/man/man8/vnconfig.8.gz
# 20120619: TOE support updated
OLD_FILES+=usr/include/netinet/toedev.h
# 20120613: auth.conf removed
OLD_FILES+=etc/auth.conf
OLD_FILES+=usr/share/examples/etc/auth.conf
OLD_FILES+=usr/share/man/man3/auth.3.gz
OLD_FILES+=usr/share/man/man3/auth_getval.3.gz
OLD_FILES+=usr/share/man/man5/auth.conf.5.gz
# 20120530: kde pam lives now in ports
OLD_FILES+=etc/pam.d/kde
# 20120521: byacc import
OLD_FILES+=usr/bin/yyfix
OLD_FILES+=usr/share/man/man1/yyfix.1.gz
# 20120505: new clang import installed a redundant internal header
OLD_FILES+=usr/include/clang/3.1/stdalign.h
# 20120428: MD2 removed from libmd
OLD_LIBS+=lib/libmd.so.5
OLD_FILES+=usr/include/md2.h
OLD_LIBS+=usr/lib32/libmd.so.5
OLD_FILES+=usr/share/man/man3/MD2Data.3.gz
OLD_FILES+=usr/share/man/man3/MD2End.3.gz
OLD_FILES+=usr/share/man/man3/MD2File.3.gz
OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz
OLD_FILES+=usr/share/man/man3/MD2Final.3.gz
OLD_FILES+=usr/share/man/man3/MD2Init.3.gz
OLD_FILES+=usr/share/man/man3/MD2Update.3.gz
OLD_FILES+=usr/share/man/man3/md2.3.gz
# 20120425: libusb version bump (r234684)
OLD_LIBS+=usr/lib/libusb.so.2
OLD_LIBS+=usr/lib32/libusb.so.2
OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz
# 20120415: new clang import which bumps version from 3.0 to 3.1
OLD_FILES+=usr/include/clang/3.0/altivec.h
OLD_FILES+=usr/include/clang/3.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.0/immintrin.h
OLD_FILES+=usr/include/clang/3.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.0
# 20120412: BIND 9.8.1 release notes removed
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html
OLD_FILES+=usr/share/doc/bind9/release-notes.css
# 20120330: legacy(4) moved to x86
OLD_FILES+=usr/include/machine/legacyvar.h
# 20120324: MPI headers updated
OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h
# 20120322: hwpmc_mips24k.h removed
OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h
# 20120322: Update heimdal to 1.5.1.
OLD_FILES+=usr/include/krb5-v4compat.h \
usr/include/krb_err.h \
usr/include/hdb-private.h \
usr/share/man/man3/krb5_addresses.3.gz \
usr/share/man/man3/krb5_cc_cursor.3.gz \
usr/share/man/man3/krb5_cc_ops.3.gz \
usr/share/man/man3/krb5_config.3.gz \
usr/share/man/man3/krb5_config_get_int_default.3.gz \
usr/share/man/man3/krb5_context.3.gz \
usr/share/man/man3/krb5_data.3.gz \
usr/share/man/man3/krb5_err.3.gz \
usr/share/man/man3/krb5_errx.3.gz \
usr/share/man/man3/krb5_keyblock.3.gz \
usr/share/man/man3/krb5_keytab_entry.3.gz \
usr/share/man/man3/krb5_kt_cursor.3.gz \
usr/share/man/man3/krb5_kt_ops.3.gz \
usr/share/man/man3/krb5_set_warn_dest.3.gz \
usr/share/man/man3/krb5_verr.3.gz \
usr/share/man/man3/krb5_verrx.3.gz \
usr/share/man/man3/krb5_vwarnx.3.gz \
usr/share/man/man3/krb5_warn.3.gz \
usr/share/man/man3/krb5_warnx.3.gz
OLD_LIBS+=usr/lib/libasn1.so.10 \
usr/lib/libhdb.so.10 \
usr/lib/libheimntlm.so.10 \
usr/lib/libhx509.so.10 \
usr/lib/libkadm5clnt.so.10 \
usr/lib/libkadm5srv.so.10 \
usr/lib/libkafs5.so.10 \
usr/lib/libkrb5.so.10 \
usr/lib/libroken.so.10 \
usr/lib32/libasn1.so.10 \
usr/lib32/libhdb.so.10 \
usr/lib32/libheimntlm.so.10 \
usr/lib32/libhx509.so.10 \
usr/lib32/libkadm5clnt.so.10 \
usr/lib32/libkadm5srv.so.10 \
usr/lib32/libkafs5.so.10 \
usr/lib32/libkrb5.so.10 \
usr/lib32/libroken.so.10
# 20120309: Remove fifofs header files.
OLD_FILES+=usr/include/fs/fifofs/fifo.h
OLD_DIRS+=usr/include/fs/fifofs
# 20120304: xlocale cleanup
OLD_FILES+=usr/include/_xlocale_ctype.h
# 20120225: libarchive 3.0.3
OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \
usr/share/man/man3/archive_read_support_compression_all.3.gz \
usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \
usr/share/man/man3/archive_read_support_compression_compress.3.gz \
usr/share/man/man3/archive_read_support_compression_gzip.3.gz \
usr/share/man/man3/archive_read_support_compression_lzma.3.gz \
usr/share/man/man3/archive_read_support_compression_none.3.gz \
usr/share/man/man3/archive_read_support_compression_program.3.gz \
usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \
usr/share/man/man3/archive_read_support_compression_xz.3.gz \
usr/share/man/man3/archive_write_set_callbacks.3.gz \
usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \
usr/share/man/man3/archive_write_set_compression_compress.3.gz \
usr/share/man/man3/archive_write_set_compression_gzip.3.gz \
usr/share/man/man3/archive_write_set_compression_none.3.gz \
usr/share/man/man3/archive_write_set_compression_program.3.gz
OLD_LIBS+=usr/lib/libarchive.so.5
OLD_LIBS+=usr/lib32/libarchive.so.5
# 20120113: removal of wtmpcvt(1)
OLD_FILES+=usr/bin/wtmpcvt
OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz
# 20111214: eventtimers(7) moved to eventtimers(4)
OLD_FILES+=usr/share/man/man7/eventtimers.7.gz
# 20111125: amd(4) removed
OLD_FILES+=usr/share/man/man4/amd.4.gz
# 20111125: libodialog removed
OLD_FILES+=usr/lib/libodialog.a
OLD_FILES+=usr/lib/libodialog.so
OLD_LIBS+=usr/lib/libodialog.so.7
OLD_FILES+=usr/lib/libodialog_p.a
OLD_FILES+=usr/lib32/libodialog.a
OLD_FILES+=usr/lib32/libodialog.so
OLD_LIBS+=usr/lib32/libodialog.so.7
OLD_FILES+=usr/lib32/libodialog_p.a
# 20110930: sysinstall removed
OLD_FILES+=usr/sbin/sysinstall
OLD_FILES+=usr/share/man/man8/sysinstall.8.gz
OLD_FILES+=usr/lib/libftpio.a
OLD_FILES+=usr/lib/libftpio.so
OLD_LIBS+=usr/lib/libftpio.so.8
OLD_FILES+=usr/lib/libftpio_p.a
OLD_FILES+=usr/lib32/libftpio.a
OLD_FILES+=usr/lib32/libftpio.so
OLD_LIBS+=usr/lib32/libftpio.so.8
OLD_FILES+=usr/lib32/libftpio_p.a
OLD_FILES+=usr/include/ftpio.h
OLD_FILES+=usr/share/man/man3/ftpio.3.gz
# 20110915: rename congestion control manpages
OLD_FILES+=usr/share/man/man9/cc.9.gz
# 20110831: atomic page flags operations
OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz
# 20110828: library version bump for 9.0
OLD_LIBS+=lib/libcam.so.5
OLD_LIBS+=lib/libpcap.so.7
OLD_LIBS+=lib/libufs.so.5
OLD_LIBS+=usr/lib/libbsnmp.so.5
OLD_LIBS+=usr/lib/libdwarf.so.2
OLD_LIBS+=usr/lib/libopie.so.6
OLD_LIBS+=usr/lib/librtld_db.so.1
OLD_LIBS+=usr/lib/libtacplus.so.4
OLD_LIBS+=usr/lib32/libcam.so.5
OLD_LIBS+=usr/lib32/libpcap.so.7
OLD_LIBS+=usr/lib32/libufs.so.5
OLD_LIBS+=usr/lib32/libbsnmp.so.5
OLD_LIBS+=usr/lib32/libdwarf.so.2
OLD_LIBS+=usr/lib32/libopie.so.6
OLD_LIBS+=usr/lib32/librtld_db.so.1
OLD_LIBS+=usr/lib32/libtacplus.so.4
# 20110817: no more acd.4, ad.4, afd.4 and ast.4
OLD_FILES+=usr/share/man/man4/acd.4.gz
OLD_FILES+=usr/share/man/man4/ad.4.gz
OLD_FILES+=usr/share/man/man4/afd.4.gz
OLD_FILES+=usr/share/man/man4/ast.4.gz
# 20110718: no longer useful in the age of rc.d
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
# 20110716: bind 9.8.0 import
OLD_LIBS+=usr/lib/liblwres.so.50
OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS
OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES
OLD_FILES+=usr/share/doc/bind9/README.idnkit
OLD_FILES+=usr/share/doc/bind9/README.pkcs11
# 20110709: vm_map_clean.9 -> vm_map_sync.9
OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz
# 20110709: Catch up with removal of these functions.
OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz
# 20110707: script no longer needed by /etc/rc.d/nfsd
OLD_FILES+=etc/rc.d/nfsserver
# 20110705: files moved so both NFS clients can share them
OLD_FILES+=usr/include/nfsclient/krpc.h
OLD_FILES+=usr/include/nfsclient/nfsdiskless.h
# 20110705: the switch of default NFS client to the new one
OLD_FILES+=sbin/mount_newnfs
OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz
OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h
# 20110628: calendar.msk removed
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk
# 20110517: libpkg removed
OLD_FILES+=usr/include/pkg.h
OLD_FILES+=usr/lib/libpkg.a
OLD_FILES+=usr/lib/libpkg.so
OLD_LIBS+=usr/lib/libpkg.so.0
OLD_FILES+=usr/lib/libpkg_p.a
OLD_FILES+=usr/lib32/libpkg.a
OLD_FILES+=usr/lib32/libpkg.so
OLD_LIBS+=usr/lib32/libpkg.so.0
OLD_FILES+=usr/lib32/libpkg_p.a
# 20110517: libsbuf version bump
OLD_LIBS+=lib/libsbuf.so.5
OLD_LIBS+=usr/lib32/libsbuf.so.5
# 20110502: new clang import which bumps version from 2.9 to 3.0
OLD_FILES+=usr/include/clang/2.9/emmintrin.h
OLD_FILES+=usr/include/clang/2.9/mm_malloc.h
OLD_FILES+=usr/include/clang/2.9/mmintrin.h
OLD_FILES+=usr/include/clang/2.9/pmmintrin.h
OLD_FILES+=usr/include/clang/2.9/tmmintrin.h
OLD_FILES+=usr/include/clang/2.9/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.9
# 20110417: removal of Objective-C support
OLD_FILES+=usr/include/objc/encoding.h
OLD_FILES+=usr/include/objc/hash.h
OLD_FILES+=usr/include/objc/NXConstStr.h
OLD_FILES+=usr/include/objc/objc-api.h
OLD_FILES+=usr/include/objc/objc-decls.h
OLD_FILES+=usr/include/objc/objc-list.h
OLD_FILES+=usr/include/objc/objc.h
OLD_FILES+=usr/include/objc/Object.h
OLD_FILES+=usr/include/objc/Protocol.h
OLD_FILES+=usr/include/objc/runtime.h
OLD_FILES+=usr/include/objc/sarray.h
OLD_FILES+=usr/include/objc/thr.h
OLD_FILES+=usr/include/objc/typedstream.h
OLD_FILES+=usr/lib/libobjc.a
OLD_FILES+=usr/lib/libobjc.so
OLD_FILES+=usr/lib/libobjc_p.a
OLD_FILES+=usr/libexec/cc1obj
OLD_LIBS+=usr/lib/libobjc.so.4
OLD_DIRS+=usr/include/objc
OLD_FILES+=usr/lib32/libobjc.a
OLD_FILES+=usr/lib32/libobjc.so
OLD_FILES+=usr/lib32/libobjc_p.a
OLD_LIBS+=usr/lib32/libobjc.so.4
# 20110331: firmware.img created at build time
OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img
# 20110224: sticky.8 -> sticky.7
OLD_FILES+=usr/share/man/man8/sticky.8.gz
# 20110220: new clang import which bumps version from 2.8 to 2.9
OLD_FILES+=usr/include/clang/2.8/emmintrin.h
OLD_FILES+=usr/include/clang/2.8/mm_malloc.h
OLD_FILES+=usr/include/clang/2.8/mmintrin.h
OLD_FILES+=usr/include/clang/2.8/pmmintrin.h
OLD_FILES+=usr/include/clang/2.8/tmmintrin.h
OLD_FILES+=usr/include/clang/2.8/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.8
# 20110119: netinet/sctp_cc_functions.h removed
OLD_FILES+=usr/include/netinet/sctp_cc_functions.h
# 20110119: Remove SYSCTL_*X* sysctl additions.
OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \
usr/share/man/man9/SYSCTL_XLONG.9.gz
# 20110112: Update dialog to new version, rename old libdialog to libodialog,
# removing associated man pages and header files.
OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \
usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \
usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \
usr/share/man/man3/dialog_create_rc.3.gz \
usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \
usr/share/man/man3/dialog_prgbox.3.gz \
usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \
usr/share/man/man3/dialog_checklist.3.gz \
usr/share/man/man3/dialog_radiolist.3.gz \
usr/share/man/man3/dialog_inputbox.3.gz \
usr/share/man/man3/dialog_clear_norefresh.3.gz \
usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \
usr/share/man/man3/dialog_fselect.3.gz \
usr/share/man/man3/dialog_notify.3.gz \
usr/share/man/man3/dialog_mesgbox.3.gz \
usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \
usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \
usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \
usr/share/man/man3/restore_helpline.3.gz \
usr/share/man/man3/dialog_msgbox.3.gz \
usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \
usr/share/examples/dialog/README usr/share/examples/dialog/checklist \
usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \
usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \
usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \
usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \
usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \
usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\
usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\
usr/share/examples/libdialog/dselect.c \
usr/share/examples/libdialog/fselect.c \
usr/share/examples/libdialog/ftree1.c \
usr/share/examples/libdialog/ftree1.test \
usr/share/examples/libdialog/ftree2.c \
usr/share/examples/libdialog/ftree2.test \
usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \
usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \
usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \
usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \
usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\
usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \
usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c
OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog
# 20101114: Remove long-obsolete MAKEDEV.8
OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz
# 20101112: vgonel(9) has gone to private API a while ago
OLD_FILES+=usr/share/man/man9/vgonel.9.gz
# 20101112: removed gasp.info
OLD_FILES+=usr/share/info/gasp.info.gz
# 20101109: machine/mutex.h removed
OLD_FILES+=usr/include/machine/mutex.h
# 20101109: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/mptable.h
.endif
# 20101101: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/apicreg.h
OLD_FILES+=usr/include/machine/mca.h
.endif
# 20101020: catch up with vm_page_sleep_if_busy rename
OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz
# 20101018: taskqueue(9) updates
OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz
# 20101011: removed subblock.h from liblzma
OLD_FILES+=usr/include/lzma/subblock.h
# 20101002: removed manpath.config
OLD_FILES+=etc/manpath.config
OLD_FILES+=usr/share/examples/etc/manpath.config
# 20100910: renamed sbuf_overflowed to sbuf_error
OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz
# 20100815: retired last traces of chooseproc(9)
OLD_FILES+=usr/share/man/man9/chooseproc.9.gz
# 20100806: removal of unused libcompat routines
OLD_FILES+=usr/share/man/man3/ascftime.3.gz
OLD_FILES+=usr/share/man/man3/cfree.3.gz
OLD_FILES+=usr/share/man/man3/cftime.3.gz
OLD_FILES+=usr/share/man/man3/getpw.3.gz
# 20100801: tzdata2010k import
OLD_FILES+=usr/share/zoneinfo/Pacific/Ponape
OLD_FILES+=usr/share/zoneinfo/Pacific/Truk
# 20100725: acpi_aiboost(4) removal.
OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz
# 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h
OLD_FILES+=usr/include/nfsclient/nfs_lock.h
# 20100720: new clang import which bumps version from 2.0 to 2.8
OLD_FILES+=usr/include/clang/2.0/emmintrin.h
OLD_FILES+=usr/include/clang/2.0/mm_malloc.h
OLD_FILES+=usr/include/clang/2.0/mmintrin.h
OLD_FILES+=usr/include/clang/2.0/pmmintrin.h
OLD_FILES+=usr/include/clang/2.0/tmmintrin.h
OLD_FILES+=usr/include/clang/2.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.0
# 20100706: removed pc-sysinstall's detect-vmware.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh
# 20100701: [powerpc] removed <machine/intr.h>
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/intr.h
.endif
# 20100514: library version bump for versioned symbols for liblzma
OLD_LIBS+=usr/lib/liblzma.so.0
OLD_LIBS+=usr/lib32/liblzma.so.0
# 20100511: move GCC-specific headers to /usr/include/gcc
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/emmintrin.h
OLD_FILES+=usr/include/mm_malloc.h
OLD_FILES+=usr/include/pmmintrin.h
OLD_FILES+=usr/include/xmmintrin.h
.endif
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/mmintrin.h
.endif
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/altivec.h
OLD_FILES+=usr/include/ppc-asm.h
OLD_FILES+=usr/include/spe.h
.endif
# 20100416: [mips] removed <machine/psl.h>
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/psl.h
.endif
# 20100415: [mips] removed unused headers
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/archtype.h
OLD_FILES+=usr/include/machine/segments.h
OLD_FILES+=usr/include/machine/rm7000.h
OLD_FILES+=usr/include/machine/defs.h
OLD_FILES+=usr/include/machine/queue.h
.endif
# 20100326: gcpio removal
OLD_FILES+=usr/bin/gcpio
OLD_FILES+=usr/share/info/cpio.info.gz
OLD_FILES+=usr/share/man/man1/gcpio.1.gz
# 20100322: libz update
OLD_LIBS+=lib/libz.so.5
OLD_LIBS+=usr/lib32/libz.so.5
# 20100314: removal of regexp.h
OLD_FILES+=usr/include/regexp.h
OLD_FILES+=usr/share/man/man3/regexp.3.gz
OLD_FILES+=usr/share/man/man3/regsub.3.gz
# 20100303: actual removal of utmp.h
OLD_FILES+=usr/include/utmp.h
# 20100208: man pages moved
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz
OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz
OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz
OLD_FILES+=usr/share/man/man4/i386/scd.4.gz
OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz
.endif
# 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd
OLD_FILES+=usr/share/doc/papers/bc.ascii.gz
OLD_FILES+=usr/share/doc/papers/dc.ascii.gz
# 20100120: replacing GNU bc/dc with BSDL versions
OLD_FILES+=usr/share/examples/bc/ckbook.b
OLD_FILES+=usr/share/examples/bc/pi.b
OLD_FILES+=usr/share/examples/bc/primes.b
OLD_FILES+=usr/share/examples/bc/twins.b
OLD_FILES+=usr/share/info/dc.info.gz
OLD_DIRS+=usr/share/examples/bc
# 20100114: removal of ttyslot(3)
OLD_FILES+=usr/share/man/man3/ttyslot.3.gz
# 20100113: remove utmp.h, replace it by utmpx.h
OLD_FILES+=usr/share/man/man3/login.3.gz
OLD_FILES+=usr/share/man/man3/logout.3.gz
OLD_FILES+=usr/share/man/man3/logwtmp.3.gz
OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz
OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz
OLD_FILES+=usr/share/man/man5/lastlog.5.gz
OLD_FILES+=usr/share/man/man5/utmp.5.gz
OLD_FILES+=usr/share/man/man5/wtmp.5.gz
OLD_LIBS+=lib/libutil.so.8
OLD_LIBS+=usr/lib32/libutil.so.8
# 20100105: new userland semaphore implementation
OLD_FILES+=usr/include/sys/semaphore.h
# 20100103: ntptrace(8) removed
OLD_FILES+=usr/sbin/ntptrace
OLD_FILES+=usr/share/man/man8/ntptrace.8.gz
# 20091229: remove no longer relevant examples
OLD_FILES+=usr/share/examples/pppd/auth-down.sample
OLD_FILES+=usr/share/examples/pppd/auth-up.sample
OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/chat.sh.sample
OLD_FILES+=usr/share/examples/pppd/ip-down.sample
OLD_FILES+=usr/share/examples/pppd/ip-up.sample
OLD_FILES+=usr/share/examples/pppd/options.sample
OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample
OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample
OLD_DIRS+=usr/share/examples/pppd
OLD_FILES+=usr/share/examples/slattach/unit-command.sh
OLD_DIRS+=usr/share/examples/slattach
OLD_FILES+=usr/share/examples/sliplogin/slip.hosts
OLD_FILES+=usr/share/examples/sliplogin/slip.login
OLD_FILES+=usr/share/examples/sliplogin/slip.logout
OLD_FILES+=usr/share/examples/sliplogin/slip.slparms
OLD_DIRS+=usr/share/examples/sliplogin
OLD_FILES+=usr/share/examples/startslip/sldown.sh
OLD_FILES+=usr/share/examples/startslip/slip.sh
OLD_FILES+=usr/share/examples/startslip/slup.sh
OLD_DIRS+=usr/share/examples/startslip
# 20091202: unify rc.firewall and rc.firewall6.
OLD_FILES+=etc/rc.d/ip6fw
OLD_FILES+=etc/rc.firewall6
OLD_FILES+=usr/share/examples/etc/rc.firewall6
# 20091117: removal of rc.early(8) link
OLD_FILES+=usr/share/man/man8/rc.early.8.gz
# 20091117: usr/share/zoneinfo/GMT link removed
OLD_FILES+=usr/share/zoneinfo/GMT
# 20091027: pselect.3 implemented as syscall
OLD_FILES+=usr/share/man/man3/pselect.3.gz
# 20091005: fusword.9 and susword.9 removed
OLD_FILES+=usr/share/man/man9/fusword.9.gz
OLD_FILES+=usr/share/man/man9/susword.9.gz
# 20090909: vesa and dpms promoted to be i386/amd64 common
OLD_FILES+=usr/include/machine/pc/vesa.h
OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz
# 20090904: remove lukemftpd
OLD_FILES+=usr/libexec/lukemftpd
OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz
OLD_FILES+=usr/share/man/man5/ftpusers.5.gz
OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz
# 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/
OLD_FILES+=etc/mtree/BSD.local.dist
OLD_FILES+=etc/mtree/BSD.x11.dist
OLD_FILES+=etc/mtree/BSD.x11-4.dist
# 20090812: net80211 documentation overhaul
OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz
# 20090801: vimage.h removed in favour of vnet.h
OLD_FILES+=usr/include/sys/vimage.h
# 20101208: libbsnmp was moved to usr/lib
OLD_LIBS+=lib/libbsnmp.so.5
# 20090719: library version bump for 8.0
OLD_LIBS+=lib/libalias.so.6
OLD_LIBS+=lib/libavl.so.1
OLD_LIBS+=lib/libbegemot.so.3
OLD_LIBS+=lib/libbsdxml.so.3
OLD_LIBS+=lib/libbsnmp.so.4
OLD_LIBS+=lib/libcam.so.4
OLD_LIBS+=lib/libcrypt.so.4
OLD_LIBS+=lib/libcrypto.so.5
OLD_LIBS+=lib/libctf.so.1
OLD_LIBS+=lib/libdevstat.so.6
OLD_LIBS+=lib/libdtrace.so.1
OLD_LIBS+=lib/libedit.so.6
OLD_LIBS+=lib/libgeom.so.4
OLD_LIBS+=lib/libipsec.so.3
OLD_LIBS+=lib/libipx.so.4
OLD_LIBS+=lib/libkiconv.so.3
OLD_LIBS+=lib/libkvm.so.4
OLD_LIBS+=lib/libmd.so.4
OLD_LIBS+=lib/libncurses.so.7
OLD_LIBS+=lib/libncursesw.so.7
OLD_LIBS+=lib/libnvpair.so.1
OLD_LIBS+=lib/libpcap.so.6
OLD_LIBS+=lib/libreadline.so.7
OLD_LIBS+=lib/libsbuf.so.4
OLD_LIBS+=lib/libufs.so.4
OLD_LIBS+=lib/libumem.so.1
OLD_LIBS+=lib/libutil.so.7
OLD_LIBS+=lib/libuutil.so.1
OLD_LIBS+=lib/libz.so.4
OLD_LIBS+=lib/libzfs.so.1
OLD_LIBS+=lib/libzpool.so.1
OLD_LIBS+=usr/lib/libarchive.so.4
OLD_LIBS+=usr/lib/libauditd.so.4
OLD_LIBS+=usr/lib/libbluetooth.so.3
OLD_LIBS+=usr/lib/libbsm.so.2
OLD_LIBS+=usr/lib/libbz2.so.3
OLD_LIBS+=usr/lib/libcalendar.so.4
OLD_LIBS+=usr/lib/libcom_err.so.4
OLD_LIBS+=usr/lib/libdevinfo.so.4
OLD_LIBS+=usr/lib/libdialog.so.6
OLD_LIBS+=usr/lib/libdwarf.so.1
OLD_LIBS+=usr/lib/libfetch.so.5
OLD_LIBS+=usr/lib/libform.so.4
OLD_LIBS+=usr/lib/libformw.so.4
OLD_LIBS+=usr/lib/libftpio.so.7
OLD_LIBS+=usr/lib/libgnuregex.so.4
OLD_LIBS+=usr/lib/libgpib.so.2
OLD_LIBS+=usr/lib/libhistory.so.7
OLD_LIBS+=usr/lib/libmagic.so.3
OLD_LIBS+=usr/lib/libmemstat.so.2
OLD_LIBS+=usr/lib/libmenu.so.4
OLD_LIBS+=usr/lib/libmenuw.so.4
OLD_LIBS+=usr/lib/libmilter.so.4
OLD_LIBS+=usr/lib/libncp.so.3
OLD_LIBS+=usr/lib/libnetgraph.so.3
OLD_LIBS+=usr/lib/libngatm.so.3
OLD_LIBS+=usr/lib/libobjc.so.3
OLD_LIBS+=usr/lib/libopie.so.5
OLD_LIBS+=usr/lib/libpam.so.4
OLD_LIBS+=usr/lib/libpanel.so.4
OLD_LIBS+=usr/lib/libpanelw.so.4
OLD_LIBS+=usr/lib/libpmc.so.4
OLD_LIBS+=usr/lib/libproc.so.1
OLD_LIBS+=usr/lib/libradius.so.3
OLD_LIBS+=usr/lib/librpcsvc.so.4
OLD_LIBS+=usr/lib/libsdp.so.3
OLD_LIBS+=usr/lib/libsmb.so.3
OLD_LIBS+=usr/lib/libssh.so.4
OLD_LIBS+=usr/lib/libssl.so.5
OLD_LIBS+=usr/lib/libtacplus.so.3
OLD_LIBS+=usr/lib/libugidfw.so.3
OLD_LIBS+=usr/lib/libusb.so.1
OLD_LIBS+=usr/lib/libusbhid.so.3
OLD_LIBS+=usr/lib/libvgl.so.5
OLD_LIBS+=usr/lib/libwrap.so.5
OLD_LIBS+=usr/lib/libypclnt.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.4
OLD_LIBS+=usr/lib/pam_deny.so.4
OLD_LIBS+=usr/lib/pam_echo.so.4
OLD_LIBS+=usr/lib/pam_exec.so.4
OLD_LIBS+=usr/lib/pam_ftpusers.so.4
OLD_LIBS+=usr/lib/pam_group.so.4
OLD_LIBS+=usr/lib/pam_guest.so.4
OLD_LIBS+=usr/lib/pam_krb5.so.4
OLD_LIBS+=usr/lib/pam_ksu.so.4
OLD_LIBS+=usr/lib/pam_lastlog.so.4
OLD_LIBS+=usr/lib/pam_login_access.so.4
OLD_LIBS+=usr/lib/pam_nologin.so.4
OLD_LIBS+=usr/lib/pam_opie.so.4
OLD_LIBS+=usr/lib/pam_opieaccess.so.4
OLD_LIBS+=usr/lib/pam_passwdqc.so.4
OLD_LIBS+=usr/lib/pam_permit.so.4
OLD_LIBS+=usr/lib/pam_radius.so.4
OLD_LIBS+=usr/lib/pam_rhosts.so.4
OLD_LIBS+=usr/lib/pam_rootok.so.4
OLD_LIBS+=usr/lib/pam_securetty.so.4
OLD_LIBS+=usr/lib/pam_self.so.4
OLD_LIBS+=usr/lib/pam_ssh.so.4
OLD_LIBS+=usr/lib/pam_tacplus.so.4
OLD_LIBS+=usr/lib/pam_unix.so.4
OLD_LIBS+=usr/lib/snmp_atm.so.5
OLD_LIBS+=usr/lib/snmp_bridge.so.5
OLD_LIBS+=usr/lib/snmp_hostres.so.5
OLD_LIBS+=usr/lib/snmp_mibII.so.5
OLD_LIBS+=usr/lib/snmp_netgraph.so.5
OLD_LIBS+=usr/lib/snmp_pf.so.5
OLD_LIBS+=usr/lib32/libalias.so.6
OLD_LIBS+=usr/lib32/libarchive.so.4
OLD_LIBS+=usr/lib32/libauditd.so.4
OLD_LIBS+=usr/lib32/libavl.so.1
OLD_LIBS+=usr/lib32/libbegemot.so.3
OLD_LIBS+=usr/lib32/libbluetooth.so.3
OLD_LIBS+=usr/lib32/libbsdxml.so.3
OLD_LIBS+=usr/lib32/libbsm.so.2
OLD_LIBS+=usr/lib32/libbsnmp.so.4
OLD_LIBS+=usr/lib32/libbz2.so.3
OLD_LIBS+=usr/lib32/libcalendar.so.4
OLD_LIBS+=usr/lib32/libcam.so.4
OLD_LIBS+=usr/lib32/libcom_err.so.4
OLD_LIBS+=usr/lib32/libcrypt.so.4
OLD_LIBS+=usr/lib32/libcrypto.so.5
OLD_LIBS+=usr/lib32/libctf.so.1
OLD_LIBS+=usr/lib32/libdevinfo.so.4
OLD_LIBS+=usr/lib32/libdevstat.so.6
OLD_LIBS+=usr/lib32/libdialog.so.6
OLD_LIBS+=usr/lib32/libdtrace.so.1
OLD_LIBS+=usr/lib32/libdwarf.so.1
OLD_LIBS+=usr/lib32/libedit.so.6
OLD_LIBS+=usr/lib32/libfetch.so.5
OLD_LIBS+=usr/lib32/libform.so.4
OLD_LIBS+=usr/lib32/libformw.so.4
OLD_LIBS+=usr/lib32/libftpio.so.7
OLD_LIBS+=usr/lib32/libgeom.so.4
OLD_LIBS+=usr/lib32/libgnuregex.so.4
OLD_LIBS+=usr/lib32/libgpib.so.2
OLD_LIBS+=usr/lib32/libhistory.so.7
OLD_LIBS+=usr/lib32/libipsec.so.3
OLD_LIBS+=usr/lib32/libipx.so.4
OLD_LIBS+=usr/lib32/libkiconv.so.3
OLD_LIBS+=usr/lib32/libkvm.so.4
OLD_LIBS+=usr/lib32/libmagic.so.3
OLD_LIBS+=usr/lib32/libmd.so.4
OLD_LIBS+=usr/lib32/libmemstat.so.2
OLD_LIBS+=usr/lib32/libmenu.so.4
OLD_LIBS+=usr/lib32/libmenuw.so.4
OLD_LIBS+=usr/lib32/libmilter.so.4
OLD_LIBS+=usr/lib32/libncp.so.3
OLD_LIBS+=usr/lib32/libncurses.so.7
OLD_LIBS+=usr/lib32/libncursesw.so.7
OLD_LIBS+=usr/lib32/libnetgraph.so.3
OLD_LIBS+=usr/lib32/libngatm.so.3
OLD_LIBS+=usr/lib32/libnvpair.so.1
OLD_LIBS+=usr/lib32/libobjc.so.3
OLD_LIBS+=usr/lib32/libopie.so.5
OLD_LIBS+=usr/lib32/libpam.so.4
OLD_LIBS+=usr/lib32/libpanel.so.4
OLD_LIBS+=usr/lib32/libpanelw.so.4
OLD_LIBS+=usr/lib32/libpcap.so.6
OLD_LIBS+=usr/lib32/libpmc.so.4
OLD_LIBS+=usr/lib32/libproc.so.1
OLD_LIBS+=usr/lib32/libradius.so.3
OLD_LIBS+=usr/lib32/libreadline.so.7
OLD_LIBS+=usr/lib32/librpcsvc.so.4
OLD_LIBS+=usr/lib32/libsbuf.so.4
OLD_LIBS+=usr/lib32/libsdp.so.3
OLD_LIBS+=usr/lib32/libsmb.so.3
OLD_LIBS+=usr/lib32/libssh.so.4
OLD_LIBS+=usr/lib32/libssl.so.5
OLD_LIBS+=usr/lib32/libtacplus.so.3
OLD_LIBS+=usr/lib32/libufs.so.4
OLD_LIBS+=usr/lib32/libugidfw.so.3
OLD_LIBS+=usr/lib32/libumem.so.1
OLD_LIBS+=usr/lib32/libusb.so.1
OLD_LIBS+=usr/lib32/libusbhid.so.3
OLD_LIBS+=usr/lib32/libutil.so.7
OLD_LIBS+=usr/lib32/libuutil.so.1
OLD_LIBS+=usr/lib32/libvgl.so.5
OLD_LIBS+=usr/lib32/libwrap.so.5
OLD_LIBS+=usr/lib32/libypclnt.so.3
OLD_LIBS+=usr/lib32/libz.so.4
OLD_LIBS+=usr/lib32/libzfs.so.1
OLD_LIBS+=usr/lib32/libzpool.so.1
OLD_LIBS+=usr/lib32/pam_chroot.so.4
OLD_LIBS+=usr/lib32/pam_deny.so.4
OLD_LIBS+=usr/lib32/pam_echo.so.4
OLD_LIBS+=usr/lib32/pam_exec.so.4
OLD_LIBS+=usr/lib32/pam_ftpusers.so.4
OLD_LIBS+=usr/lib32/pam_group.so.4
OLD_LIBS+=usr/lib32/pam_guest.so.4
OLD_LIBS+=usr/lib32/pam_krb5.so.4
OLD_LIBS+=usr/lib32/pam_ksu.so.4
OLD_LIBS+=usr/lib32/pam_lastlog.so.4
OLD_LIBS+=usr/lib32/pam_login_access.so.4
OLD_LIBS+=usr/lib32/pam_nologin.so.4
OLD_LIBS+=usr/lib32/pam_opie.so.4
OLD_LIBS+=usr/lib32/pam_opieaccess.so.4
OLD_LIBS+=usr/lib32/pam_passwdqc.so.4
OLD_LIBS+=usr/lib32/pam_permit.so.4
OLD_LIBS+=usr/lib32/pam_radius.so.4
OLD_LIBS+=usr/lib32/pam_rhosts.so.4
OLD_LIBS+=usr/lib32/pam_rootok.so.4
OLD_LIBS+=usr/lib32/pam_securetty.so.4
OLD_LIBS+=usr/lib32/pam_self.so.4
OLD_LIBS+=usr/lib32/pam_ssh.so.4
OLD_LIBS+=usr/lib32/pam_tacplus.so.4
OLD_LIBS+=usr/lib32/pam_unix.so.4
# 20090718: the gdm pam.d file is no longer required.
OLD_FILES+=etc/pam.d/gdm
# 20090714: net_add_domain(9) renamed to domain_add(9)
OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz
# 20090713: vimage container structs removed.
OLD_FILES+=usr/include/netinet/vinet.h
OLD_FILES+=usr/include/netinet6/vinet6.h
OLD_FILES+=usr/include/netipsec/vipsec.h
# 20090712: ieee80211.4 -> net80211.4
OLD_FILES+=usr/share/man/man4/ieee80211.4.gz
# 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9
OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz
# 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved
OLD_FILES+=usr/share/man/man3/msgctl.3.gz
OLD_FILES+=usr/share/man/man3/msgget.3.gz
OLD_FILES+=usr/share/man/man3/msgrcv.3.gz
OLD_FILES+=usr/share/man/man3/msgsnd.3.gz
# 20090630: old kernel RPC implementation removal
OLD_FILES+=usr/include/nfs/rpcv2.h
# 20090624: update usbdi(9)
OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz
OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz
# 20090623: number of headers needed for a usb driver reduced
OLD_FILES+=usr/include/dev/usb/usb_defs.h
OLD_FILES+=usr/include/dev/usb/usb_error.h
OLD_FILES+=usr/include/dev/usb/usb_handle_request.h
OLD_FILES+=usr/include/dev/usb/usb_hid.h
OLD_FILES+=usr/include/dev/usb/usb_lookup.h
OLD_FILES+=usr/include/dev/usb/usb_mfunc.h
OLD_FILES+=usr/include/dev/usb/usb_parse.h
OLD_FILES+=usr/include/dev/usb/usb_revision.h
# 20090609: devclass_add_driver is no longer public
OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz
# 20090605: removal of clists
OLD_FILES+=usr/include/sys/clist.h
# 20090602: removal of window(1)
OLD_FILES+=usr/bin/window
OLD_FILES+=usr/share/man/man1/window.1.gz
# 20090531: bind 9.6.1rc1 import
OLD_LIBS+=usr/lib/liblwres.so.30
# 20090530: removal of early.sh
OLD_FILES+=etc/rc.d/early.sh
# 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER()
OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz
OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz
# 20090527: removal of legacy USB stack
OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/hid.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ubser.h
OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h
OLD_FILES+=usr/include/legacy/dev/usb/udbp.h
OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h
OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h
OLD_DIRS+=usr/include/legacy/dev/usb
OLD_DIRS+=usr/include/legacy/dev
OLD_DIRS+=usr/include/legacy
# 20090526: removal of makekey(8)
OLD_FILES+=usr/libexec/makekey
OLD_FILES+=usr/share/man/man8/makekey.8.gz
# 20090522: removal of University of Michigan NFSv4 client
OLD_FILES+=etc/rc.d/idmapd
OLD_FILES+=sbin/idmapd
OLD_FILES+=sbin/mount_nfs4
OLD_FILES+=usr/share/man/man8/idmapd.8.gz
OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz
# 20090513: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h
# 20090417: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h
# 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8)
OLD_FILES+=usr/sbin/raycontrol
OLD_FILES+=usr/share/man/man4/i386/ar.4.gz
OLD_FILES+=usr/share/man/man4/i386/ray.4.gz
OLD_FILES+=usr/share/man/man4/i386/sr.4.gz
OLD_FILES+=usr/share/man/man8/raycontrol.8.gz
# 20090410: VOP_LEASE.9 removed
OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz
# 20090406: usb_sw_transfer.h removed
OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h
# 20090405: removal of if_ppp(4) and if_sl(4)
OLD_FILES+=sbin/slattach rescue/slattach
OLD_FILES+=sbin/startslip rescue/startslip
OLD_FILES+=usr/include/net/if_ppp.h
OLD_FILES+=usr/include/net/if_pppvar.h
OLD_FILES+=usr/include/net/if_slvar.h
OLD_FILES+=usr/include/net/ppp_comp.h
OLD_FILES+=usr/include/net/slip.h
OLD_FILES+=usr/sbin/sliplogin
OLD_FILES+=usr/sbin/slstat
OLD_FILES+=usr/sbin/pppd
OLD_FILES+=usr/sbin/pppstats
OLD_FILES+=usr/share/man/man1/startslip.1.gz
OLD_FILES+=usr/share/man/man4/if_ppp.4.gz
OLD_FILES+=usr/share/man/man4/if_sl.4.gz
OLD_FILES+=usr/share/man/man4/ppp.4.gz
OLD_FILES+=usr/share/man/man4/sl.4.gz
OLD_FILES+=usr/share/man/man8/pppd.8.gz
OLD_FILES+=usr/share/man/man8/pppstats.8.gz
OLD_FILES+=usr/share/man/man8/slattach.8.gz
OLD_FILES+=usr/share/man/man8/slip.8.gz
OLD_FILES+=usr/share/man/man8/sliplogin.8.gz
OLD_FILES+=usr/share/man/man8/slstat.8.gz
# 20090321: libpcap upgraded to 1.0.0
OLD_LIBS+=lib/libpcap.so.5
OLD_LIBS+=usr/lib32/libpcap.so.5
# 20090319: uscanner(4) has been removed
OLD_FILES+=usr/share/man/man4/uscanner.4.gz
# 20090313: k8temp(4) renamed to amdtemp(4)
OLD_FILES+=usr/share/man/man4/k8temp.4.gz
# 20090308: libusb.so.1 renamed
OLD_LIBS+=usr/lib/libusb20.so.1
OLD_FILES+=usr/lib/libusb20.a
OLD_FILES+=usr/lib/libusb20.so
OLD_FILES+=usr/lib/libusb20_p.a
OLD_FILES+=usr/include/libusb20_compat01.h
OLD_FILES+=usr/include/libusb20_compat10.h
OLD_LIBS+=usr/lib32/libusb20.so.1
OLD_FILES+=usr/lib32/libusb20.a
OLD_FILES+=usr/lib32/libusb20.so
OLD_FILES+=usr/lib32/libusb20_p.a
# 20090226: libmp(3) functions renamed
OLD_LIBS+=usr/lib/libmp.so.6
OLD_LIBS+=usr/lib32/libmp.so.6
# 20090223: changeover of USB stacks
OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h
OLD_DIRS+=usr/include/dev/usb2/include
OLD_DIRS+=usr/include/dev/usb2
OLD_FILES+=usr/include/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/dev/usb/ehcireg.h
OLD_FILES+=usr/include/dev/usb/ehcivar.h
OLD_FILES+=usr/include/dev/usb/hid.h
OLD_FILES+=usr/include/dev/usb/if_auereg.h
OLD_FILES+=usr/include/dev/usb/if_axereg.h
OLD_FILES+=usr/include/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/dev/usb/kue_fw.h
OLD_FILES+=usr/include/dev/usb/ohcireg.h
OLD_FILES+=usr/include/dev/usb/ohcivar.h
OLD_FILES+=usr/include/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/dev/usb/ubser.h
OLD_FILES+=usr/include/dev/usb/ucomvar.h
OLD_FILES+=usr/include/dev/usb/udbp.h
OLD_FILES+=usr/include/dev/usb/uftdireg.h
OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/dev/usb/uhcireg.h
OLD_FILES+=usr/include/dev/usb/uhcivar.h
OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h
OLD_FILES+=usr/include/dev/usb/usb_mem.h
OLD_FILES+=usr/include/dev/usb/usb_port.h
OLD_FILES+=usr/include/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/dev/usb/usbcdc.h
OLD_FILES+=usr/include/dev/usb/usbdivar.h
OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h
OLD_FILES+=usr/sbin/usbdevs
OLD_FILES+=usr/share/man/man8/usbdevs.8.gz
# 20090203: removal of pccard header files
OLD_FILES+=usr/include/pccard/cardinfo.h
OLD_FILES+=usr/include/pccard/cis.h
OLD_DIRS+=usr/include/pccard
# 20090203: adding_user.8 moved to adding_user.7
OLD_FILES+=usr/share/man/man8/adding_user.8.gz
# 20090122: tzdata2009a import
OLD_FILES+=usr/share/zoneinfo/Asia/Katmandu
# 20090102: file 4.26 import
OLD_FILES+=usr/share/misc/magic.mime
OLD_FILES+=usr/share/misc/magic.mime.mgc
# 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1
OLD_FILES+=usr/share/man/man8/nsupdate.8.gz
# 20081223: ipprotosw.h removed
OLD_FILES+=usr/include/netinet/ipprotosw.h
# 20081123: vfs_mountedon.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz
# 20081023: FREE.9 and MALLOC.9 removed
OLD_FILES+=usr/share/man/man9/FREE.9.gz
OLD_FILES+=usr/share/man/man9/MALLOC.9.gz
# 20080928: removal of inaccurate device_ids(9) manual page
OLD_FILES+=usr/share/man/man9/device_ids.9.gz
OLD_FILES+=usr/share/man/man9/major.9.gz
OLD_FILES+=usr/share/man/man9/minor.9.gz
OLD_FILES+=usr/share/man/man9/umajor.9.gz
OLD_FILES+=usr/share/man/man9/uminor.9.gz
# 20080917: removal of manpage for axed kernel primitive suser(9)
OLD_FILES+=usr/share/man/man9/suser.9.gz
OLD_FILES+=usr/share/man/man9/suser_cred.9.gz
# 20080913: pax removed from rescue
OLD_FILES+=rescue/pax
# 20080823: removal of unneeded pt_chown, to implement grantpt(3)
OLD_FILES+=usr/libexec/pt_chown
# 20080822: ntp 4.2.4p5 import
OLD_FILES+=usr/share/doc/ntp/driver23.html
OLD_FILES+=usr/share/doc/ntp/driver24.html
# 20080821: several man pages moved from man4.i386 to man4
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz
OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz
OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz
OLD_FILES+=usr/share/man/man4/i386/io.4.gz
OLD_FILES+=usr/share/man/man4/i386/linux.4.gz
OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz
.endif
# 20080820: MPSAFE TTY layer integrated
OLD_FILES+=usr/include/sys/linedisc.h
OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz
# 20080725: sgtty.h removed
OLD_FILES+=usr/include/sgtty.h
# 20080706: bsdlabel(8) removed on powerpc
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=sbin/bsdlabel
OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz
.endif
# 20080704: sbsh(4) removed
OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz
OLD_FILES+=usr/share/man/man4/sbsh.4.gz
# 20080704: cnw(4) removed
OLD_FILES+=usr/share/man/man4/if_cnw.4.gz
OLD_FILES+=usr/share/man/man4/cnw.4.gz
# 20080704: oltr(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz
OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz
.endif
# 20080704: arl(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/sbin/arlcontrol
OLD_FILES+=usr/share/man/man4/i386/arl.4.gz
OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz
.endif
# 20080703: sunlabel only for sparc64
.if ${TARGET_ARCH} != "sparc64"
OLD_FILES+=sbin/sunlabel
OLD_FILES+=usr/share/man/man8/sunlabel.8.gz
.endif
# 20080701: wpa_supplicant.conf moved to share/examples/etc/
OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf
OLD_DIRS+=usr/share/examples/wpa_supplicant
# 20080614: pecoff image activator removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pecoff_machdep.h
.endif
# 20080614: sgtty removed
OLD_FILES+=usr/include/sys/ttychars.h
OLD_FILES+=usr/include/sys/ttydev.h
OLD_FILES+=usr/share/man/man3/gtty.3.gz
OLD_FILES+=usr/share/man/man3/stty.3.gz
# 20080609: gpt(8) removed
OLD_FILES+=sbin/gpt
OLD_FILES+=usr/share/man/man8/gpt.8.gz
# 20080525: I4B removed
OLD_FILES+=etc/isdn/answer
OLD_FILES+=etc/isdn/isdntel
OLD_FILES+=etc/isdn/record
OLD_FILES+=etc/isdn/tell
OLD_FILES+=etc/isdn/tell-record
OLD_FILES+=etc/isdn/unknown_incoming
OLD_FILES+=etc/isdn/holidays.D
OLD_FILES+=etc/isdn/isdnd.rates.A
OLD_FILES+=etc/isdn/isdnd.rates.D
OLD_FILES+=etc/isdn/isdnd.rates.F
OLD_FILES+=etc/isdn/isdnd.rates.L
OLD_FILES+=etc/isdn/isdnd.rates.UK.BT
OLD_FILES+=etc/isdn/isdnd.rc.sample
OLD_FILES+=etc/isdn/isdntel.alias.sample
OLD_DIRS+=etc/isdn
OLD_FILES+=etc/rc.d/isdnd
OLD_FILES+=usr/include/i4b/i4b_cause.h
OLD_FILES+=usr/include/i4b/i4b_debug.h
OLD_FILES+=usr/include/i4b/i4b_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_trace.h
OLD_DIRS+=usr/include/i4b
OLD_FILES+=usr/sbin/dtmfdecode
OLD_FILES+=usr/sbin/g711conv
OLD_FILES+=usr/sbin/isdnd
OLD_FILES+=usr/sbin/isdndebug
OLD_FILES+=usr/sbin/isdndecode
OLD_FILES+=usr/sbin/isdnmonitor
OLD_FILES+=usr/sbin/isdnphone
OLD_FILES+=usr/sbin/isdntel
OLD_FILES+=usr/sbin/isdntelctl
OLD_FILES+=usr/sbin/isdntrace
OLD_FILES+=usr/share/isdn/0.al
OLD_FILES+=usr/share/isdn/1.al
OLD_FILES+=usr/share/isdn/2.al
OLD_FILES+=usr/share/isdn/3.al
OLD_FILES+=usr/share/isdn/4.al
OLD_FILES+=usr/share/isdn/5.al
OLD_FILES+=usr/share/isdn/6.al
OLD_FILES+=usr/share/isdn/7.al
OLD_FILES+=usr/share/isdn/8.al
OLD_FILES+=usr/share/isdn/9.al
OLD_FILES+=usr/share/isdn/beep.al
OLD_FILES+=usr/share/isdn/msg.al
OLD_DIRS+=usr/share/isdn
OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz
OLD_FILES+=usr/share/man/man1/g711conv.1.gz
OLD_FILES+=usr/share/man/man4/i4b.4.gz
OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz
OLD_FILES+=usr/share/man/man4/i4bctl.4.gz
OLD_FILES+=usr/share/man/man4/i4bing.4.gz
OLD_FILES+=usr/share/man/man4/i4bipr.4.gz
OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz
OLD_FILES+=usr/share/man/man4/i4bq921.4.gz
OLD_FILES+=usr/share/man/man4/i4bq931.4.gz
OLD_FILES+=usr/share/man/man4/i4brbch.4.gz
OLD_FILES+=usr/share/man/man4/i4btel.4.gz
OLD_FILES+=usr/share/man/man4/i4btrc.4.gz
OLD_FILES+=usr/share/man/man4/iavc.4.gz
OLD_FILES+=usr/share/man/man4/isic.4.gz
OLD_FILES+=usr/share/man/man4/ifpi.4.gz
OLD_FILES+=usr/share/man/man4/ifpi2.4.gz
OLD_FILES+=usr/share/man/man4/ifpnp.4.gz
OLD_FILES+=usr/share/man/man4/ihfc.4.gz
OLD_FILES+=usr/share/man/man4/itjc.4.gz
OLD_FILES+=usr/share/man/man4/iwic.4.gz
OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz
OLD_FILES+=usr/share/man/man8/isdnd.8.gz
OLD_FILES+=usr/share/man/man8/isdndebug.8.gz
OLD_FILES+=usr/share/man/man8/isdndecode.8.gz
OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz
OLD_FILES+=usr/share/man/man8/isdnphone.8.gz
OLD_FILES+=usr/share/man/man8/isdntel.8.gz
OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz
OLD_FILES+=usr/share/man/man8/isdntrace.8.gz
OLD_FILES+=usr/share/examples/isdn/contrib/README
OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp
OLD_FILES+=usr/share/examples/isdn/contrib/answer.c
OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh
OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh
OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c
OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt
OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl
OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c
OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh
OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile
OLD_FILES+=usr/share/examples/isdn/i4brunppp/README
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c
OLD_FILES+=usr/share/examples/isdn/v21/Makefile
OLD_FILES+=usr/share/examples/isdn/v21/README
OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c
OLD_FILES+=usr/share/examples/isdn/FAQ
OLD_FILES+=usr/share/examples/isdn/KERNEL
OLD_FILES+=usr/share/examples/isdn/Overview
OLD_FILES+=usr/share/examples/isdn/README
OLD_FILES+=usr/share/examples/isdn/ROADMAP
OLD_FILES+=usr/share/examples/isdn/ReleaseNotes
OLD_FILES+=usr/share/examples/isdn/Resources
OLD_FILES+=usr/share/examples/isdn/SupportedCards
OLD_FILES+=usr/share/examples/isdn/ThankYou
OLD_DIRS+=usr/share/examples/isdn/contrib
OLD_DIRS+=usr/share/examples/isdn/i4brunppp
OLD_DIRS+=usr/share/examples/isdn/v21
OLD_DIRS+=usr/share/examples/isdn
OLD_FILES+=usr/share/examples/ppp/isdnd.rc
OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn
# 20080525: ng_atmpif removed
OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h
OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz
# 20080522: pmap_addr_hint removed
OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz
# 20080517: ipsec_osdep.h removed
OLD_FILES+=usr/include/netipsec/ipsec_osdep.h
# 20080507: heimdal 1.1 import
OLD_LIBS+=usr/lib/libasn1.so.9
OLD_LIBS+=usr/lib/libgssapi.so.9
OLD_LIBS+=usr/lib/libgssapi_krb5.so.9
OLD_LIBS+=usr/lib/libhdb.so.9
OLD_LIBS+=usr/lib/libkadm5clnt.so.9
OLD_LIBS+=usr/lib/libkadm5srv.so.9
OLD_LIBS+=usr/lib/libkafs5.so.9
OLD_LIBS+=usr/lib/libkrb5.so.9
OLD_LIBS+=usr/lib/libroken.so.9
OLD_LIBS+=usr/lib32/libgssapi.so.9
# 20080420: Symbol card support dropped
OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h
# 20080420: awi removal
OLD_FILES+=usr/share/man/man4/awi.4.gz
OLD_FILES+=usr/share/man/man4/if_awi.4.gz
# 20080331: pkg_sign has been removed
OLD_FILES+=usr/sbin/pkg_check
OLD_FILES+=usr/sbin/pkg_sign
OLD_FILES+=usr/share/man/man1/pkg_check.1.gz
OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz
# 20080325: tzdata2008b import
OLD_FILES+=usr/share/zoneinfo/Asia/Calcutta
OLD_FILES+=usr/share/zoneinfo/Asia/Saigon
# 20080314: stack_print(9) mlink fixed
OLD_FILES+=usr/share/man/man9/stack_printf.9.gz
# 20080312: libkse removal
OLD_FILES+=usr/include/sys/kse.h
OLD_FILES+=usr/lib/libkse.so
OLD_LIBS+=usr/lib/libkse.so.3
OLD_FILES+=usr/share/man/man2/kse.2.gz
OLD_FILES+=usr/share/man/man2/kse_create.2.gz
OLD_FILES+=usr/share/man/man2/kse_exit.2.gz
OLD_FILES+=usr/share/man/man2/kse_release.2.gz
OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz
OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz
OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz
OLD_FILES+=usr/lib32/libkse.so
OLD_LIBS+=usr/lib32/libkse.so.3
# 20080225: bsdar/bsdranlib rename to ar/ranlib
OLD_FILES+=usr/bin/bsdar
OLD_FILES+=usr/bin/bsdranlib
OLD_FILES+=usr/share/man/man1/bsdar.1.gz
OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz
# 20080220: geom_lvm rename to geom_linux_lvm
OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz
# 20080126: oldcard.4 removal
OLD_FILES+=usr/share/man/man4/card.4.gz
OLD_FILES+=usr/share/man/man4/oldcard.4.gz
# 20080122: Removed from the tree
OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz
# 20080108: Moved to section 2
OLD_FILES+=usr/share/man/man3/shm_open.3.gz
OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz
# 20071207: Merged with fortunes-o.real
OLD_FILES+=usr/share/games/fortune/fortunes2-o
OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat
# 20071201: Removal of XRPU driver
OLD_FILES+=usr/include/sys/xrpuio.h
# 20071129: Disabled static versions of libkse by default
OLD_FILES+=usr/lib/libkse.a
OLD_FILES+=usr/lib/libkse_p.a
OLD_FILES+=usr/lib/libkse_pic.a
OLD_FILES+=usr/lib32/libkse.a
OLD_FILES+=usr/lib32/libkse_p.a
OLD_FILES+=usr/lib32/libkse_pic.a
# 20071129: Removed a Solaris compatibility header
OLD_FILES+=usr/include/sys/_elf_solaris.h
# 20071125: Renamed to pmc_get_msr()
OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz
# 20071108: Removed very crunch OLDCARD support file
OLD_FILES+=etc/defaults/pccard.conf
# 20071025: rc.d/nfslocking superseded by rc.d/lockd and rc.d/statd
OLD_FILES+=etc/rc.d/nfslocking
# 20070930: rename of cached to nscd
OLD_FILES+=etc/cached.conf
OLD_FILES+=etc/rc.d/cached
OLD_FILES+=usr/sbin/cached
OLD_FILES+=usr/share/man/man5/cached.conf.5.gz
OLD_FILES+=usr/share/man/man8/cached.8.gz
# 20070807: removal of PowerPC specific header file.
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/interruptvar.h
.endif
# 20070801: fast_ipsec.4 gone
OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz
# 20070715: netatm temporarily disconnected (removed 20080525)
OLD_FILES+=rescue/atm
OLD_FILES+=rescue/fore_dnld
OLD_FILES+=rescue/ilmid
OLD_FILES+=sbin/atm
OLD_FILES+=sbin/fore_dnld
OLD_FILES+=sbin/ilmid
OLD_FILES+=usr/include/libatm.h
OLD_FILES+=usr/include/netatm/atm.h
OLD_FILES+=usr/include/netatm/atm_cm.h
OLD_FILES+=usr/include/netatm/atm_if.h
OLD_FILES+=usr/include/netatm/atm_ioctl.h
OLD_FILES+=usr/include/netatm/atm_pcb.h
OLD_FILES+=usr/include/netatm/atm_sap.h
OLD_FILES+=usr/include/netatm/atm_sigmgr.h
OLD_FILES+=usr/include/netatm/atm_stack.h
OLD_FILES+=usr/include/netatm/atm_sys.h
OLD_FILES+=usr/include/netatm/atm_var.h
OLD_FILES+=usr/include/netatm/atm_vc.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h
OLD_FILES+=usr/include/netatm/port.h
OLD_FILES+=usr/include/netatm/queue.h
OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h
OLD_FILES+=usr/include/netatm/spans/spans_cls.h
OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h
OLD_FILES+=usr/include/netatm/spans/spans_var.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h
OLD_FILES+=usr/include/netatm/uni/sscop.h
OLD_FILES+=usr/include/netatm/uni/sscop_misc.h
OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h
OLD_FILES+=usr/include/netatm/uni/sscop_var.h
OLD_FILES+=usr/include/netatm/uni/uni.h
OLD_FILES+=usr/include/netatm/uni/uniip_var.h
OLD_FILES+=usr/include/netatm/uni/unisig.h
OLD_FILES+=usr/include/netatm/uni/unisig_decode.h
OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h
OLD_FILES+=usr/include/netatm/uni/unisig_msg.h
OLD_FILES+=usr/include/netatm/uni/unisig_print.h
OLD_FILES+=usr/include/netatm/uni/unisig_var.h
OLD_FILES+=usr/lib/libatm.a
OLD_FILES+=usr/lib/libatm_p.a
OLD_FILES+=usr/sbin/atmarpd
OLD_FILES+=usr/sbin/scspd
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz
OLD_FILES+=usr/share/man/man8/atm.8.gz
OLD_FILES+=usr/share/man/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/man8/scspd.8.gz
OLD_FILES+=usr/share/examples/atm/NOTES
OLD_FILES+=usr/share/examples/atm/README
OLD_FILES+=usr/share/examples/atm/Startup
OLD_FILES+=usr/share/examples/atm/atm-config.sh
OLD_FILES+=usr/share/examples/atm/atm-sockets.txt
OLD_FILES+=usr/share/examples/atm/cpcs-design.txt
OLD_FILES+=usr/share/examples/atm/fore-microcode.txt
OLD_FILES+=usr/share/examples/atm/sscf-design.txt
OLD_FILES+=usr/share/examples/atm/sscop-design.txt
OLD_LIBS+=lib/libatm.so.5
OLD_LIBS+=usr/lib/libatm.so
OLD_DIRS+=usr/include/netatm/sigpvc
OLD_DIRS+=usr/include/netatm/spans
OLD_DIRS+=usr/include/netatm/ipatm
OLD_DIRS+=usr/include/netatm/uni
OLD_DIRS+=usr/include/netatm
OLD_DIRS+=usr/share/examples/atm
OLD_FILES+=usr/lib32/libatm.a
OLD_FILES+=usr/lib32/libatm.so
OLD_LIBS+=usr/lib32/libatm.so.5
OLD_FILES+=usr/lib32/libatm_p.a
# 20070705: I4B headers repo-copied to include/i4b/
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/i4b_cause.h
OLD_FILES+=usr/include/machine/i4b_debug.h
OLD_FILES+=usr/include/machine/i4b_ioctl.h
OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h
OLD_FILES+=usr/include/machine/i4b_trace.h
.endif
# 20070703: pf 4.1 import
OLD_FILES+=usr/libexec/ftp-proxy
# 20070701: KAME IPSec removal
OLD_FILES+=usr/include/netinet6/ah.h
OLD_FILES+=usr/include/netinet6/ah6.h
OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h
OLD_FILES+=usr/include/netinet6/esp.h
OLD_FILES+=usr/include/netinet6/esp6.h
OLD_FILES+=usr/include/netinet6/esp_aesctr.h
OLD_FILES+=usr/include/netinet6/esp_camellia.h
OLD_FILES+=usr/include/netinet6/esp_rijndael.h
OLD_FILES+=usr/include/netinet6/ipsec.h
OLD_FILES+=usr/include/netinet6/ipsec6.h
OLD_FILES+=usr/include/netinet6/ipcomp.h
OLD_FILES+=usr/include/netinet6/ipcomp6.h
OLD_FILES+=usr/include/netkey/key.h
OLD_FILES+=usr/include/netkey/key_debug.h
OLD_FILES+=usr/include/netkey/key_var.h
OLD_FILES+=usr/include/netkey/keydb.h
OLD_FILES+=usr/include/netkey/keysock.h
OLD_DIRS+=usr/include/netkey
# 20070701: remove wicontrol
OLD_FILES+=usr/sbin/wicontrol
OLD_FILES+=usr/share/man/man8/wicontrol.8.gz
# 20070625: umapfs removal
OLD_FILES+=rescue/mount_umapfs
OLD_FILES+=sbin/mount_umapfs
OLD_FILES+=usr/include/fs/umapfs/umap.h
OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz
OLD_DIRS+=usr/include/fs/umapfs
# 20070618: Removal of the PROTO.localhost* files
OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=etc/namedb/PROTO.localhost.rev
OLD_FILES+=etc/namedb/make-localhost
# 20070618: shared library version bump
OLD_LIBS+=lib/libalias.so.5
OLD_LIBS+=lib/libbsnmp.so.3
OLD_LIBS+=lib/libncurses.so.6
OLD_LIBS+=lib/libncursesw.so.6
OLD_LIBS+=lib/libreadline.so.6
OLD_LIBS+=usr/lib/libdialog.so.5
OLD_LIBS+=usr/lib/libgnuregex.so.3
OLD_LIBS+=usr/lib/libhistory.so.6
OLD_LIBS+=usr/lib/libpam.so.3
OLD_LIBS+=usr/lib/libssh.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.3
OLD_LIBS+=usr/lib/pam_deny.so.3
OLD_LIBS+=usr/lib/pam_echo.so.3
OLD_LIBS+=usr/lib/pam_exec.so.3
OLD_LIBS+=usr/lib/pam_ftpusers.so.3
OLD_LIBS+=usr/lib/pam_group.so.3
OLD_LIBS+=usr/lib/pam_guest.so.3
OLD_LIBS+=usr/lib/pam_krb5.so.3
OLD_LIBS+=usr/lib/pam_ksu.so.3
OLD_LIBS+=usr/lib/pam_lastlog.so.3
OLD_LIBS+=usr/lib/pam_login_access.so.3
OLD_LIBS+=usr/lib/pam_nologin.so.3
OLD_LIBS+=usr/lib/pam_opie.so.3
OLD_LIBS+=usr/lib/pam_opieaccess.so.3
OLD_LIBS+=usr/lib/pam_passwdqc.so.3
OLD_LIBS+=usr/lib/pam_permit.so.3
OLD_LIBS+=usr/lib/pam_radius.so.3
OLD_LIBS+=usr/lib/pam_rhosts.so.3
OLD_LIBS+=usr/lib/pam_rootok.so.3
OLD_LIBS+=usr/lib/pam_securetty.so.3
OLD_LIBS+=usr/lib/pam_self.so.3
OLD_LIBS+=usr/lib/pam_ssh.so.3
OLD_LIBS+=usr/lib/pam_tacplus.so.3
OLD_LIBS+=usr/lib/pam_unix.so.3
OLD_LIBS+=usr/lib/snmp_atm.so.4
OLD_LIBS+=usr/lib/snmp_bridge.so.4
OLD_LIBS+=usr/lib/snmp_hostres.so.4
OLD_LIBS+=usr/lib/snmp_mibII.so.4
OLD_LIBS+=usr/lib/snmp_netgraph.so.4
OLD_LIBS+=usr/lib/snmp_pf.so.4
OLD_LIBS+=usr/lib32/libalias.so.5
OLD_LIBS+=usr/lib32/libbsnmp.so.3
OLD_LIBS+=usr/lib32/libdialog.so.5
OLD_LIBS+=usr/lib32/libgnuregex.so.3
OLD_LIBS+=usr/lib32/libhistory.so.6
OLD_LIBS+=usr/lib32/libncurses.so.6
OLD_LIBS+=usr/lib32/libncursesw.so.6
OLD_LIBS+=usr/lib32/libpam.so.3
OLD_LIBS+=usr/lib32/libreadline.so.6
OLD_LIBS+=usr/lib32/libssh.so.3
OLD_LIBS+=usr/lib32/pam_chroot.so.3
OLD_LIBS+=usr/lib32/pam_deny.so.3
OLD_LIBS+=usr/lib32/pam_echo.so.3
OLD_LIBS+=usr/lib32/pam_exec.so.3
OLD_LIBS+=usr/lib32/pam_ftpusers.so.3
OLD_LIBS+=usr/lib32/pam_group.so.3
OLD_LIBS+=usr/lib32/pam_guest.so.3
OLD_LIBS+=usr/lib32/pam_krb5.so.3
OLD_LIBS+=usr/lib32/pam_ksu.so.3
OLD_LIBS+=usr/lib32/pam_lastlog.so.3
OLD_LIBS+=usr/lib32/pam_login_access.so.3
OLD_LIBS+=usr/lib32/pam_nologin.so.3
OLD_LIBS+=usr/lib32/pam_opie.so.3
OLD_LIBS+=usr/lib32/pam_opieaccess.so.3
OLD_LIBS+=usr/lib32/pam_passwdqc.so.3
OLD_LIBS+=usr/lib32/pam_permit.so.3
OLD_LIBS+=usr/lib32/pam_radius.so.3
OLD_LIBS+=usr/lib32/pam_rhosts.so.3
OLD_LIBS+=usr/lib32/pam_rootok.so.3
OLD_LIBS+=usr/lib32/pam_securetty.so.3
OLD_LIBS+=usr/lib32/pam_self.so.3
OLD_LIBS+=usr/lib32/pam_ssh.so.3
OLD_LIBS+=usr/lib32/pam_tacplus.so.3
OLD_LIBS+=usr/lib32/pam_unix.so.3
# 20070613: IPX over IP tunnel removal
OLD_FILES+=usr/include/netipx/ipx_ip.h
# 20070605: sched_core removal
OLD_FILES+=usr/share/man/man4/sched_core.4.gz
# 20070603: BIND 9.4.1 import
OLD_LIBS+=usr/lib/liblwres.so.10
# 20070521: shared library version bump
OLD_LIBS+=lib/libatm.so.4
OLD_LIBS+=lib/libbegemot.so.2
OLD_LIBS+=lib/libbsdxml.so.2
OLD_LIBS+=lib/libcam.so.3
OLD_LIBS+=lib/libcrypt.so.3
OLD_LIBS+=lib/libdevstat.so.5
OLD_LIBS+=lib/libedit.so.5
OLD_LIBS+=lib/libgeom.so.3
OLD_LIBS+=lib/libipsec.so.2
OLD_LIBS+=lib/libipx.so.3
OLD_LIBS+=lib/libkiconv.so.2
OLD_LIBS+=lib/libkse.so.2
OLD_LIBS+=lib/libkvm.so.3
OLD_LIBS+=lib/libm.so.4
OLD_LIBS+=lib/libmd.so.3
OLD_LIBS+=lib/libpcap.so.4
OLD_LIBS+=lib/libpthread.so.2
OLD_LIBS+=lib/libsbuf.so.3
OLD_LIBS+=lib/libthr.so.2
OLD_LIBS+=lib/libufs.so.3
OLD_LIBS+=lib/libutil.so.6
OLD_LIBS+=lib/libz.so.3
OLD_LIBS+=usr/lib/libbluetooth.so.2
OLD_LIBS+=usr/lib/libbsm.so.1
OLD_LIBS+=usr/lib/libbz2.so.2
OLD_LIBS+=usr/lib/libcalendar.so.3
OLD_LIBS+=usr/lib/libcom_err.so.3
OLD_LIBS+=usr/lib/libdevinfo.so.3
OLD_LIBS+=usr/lib/libfetch.so.4
OLD_LIBS+=usr/lib/libform.so.3
OLD_LIBS+=usr/lib/libformw.so.3
OLD_LIBS+=usr/lib/libftpio.so.6
OLD_LIBS+=usr/lib/libgpib.so.1
OLD_LIBS+=usr/lib/libkse.so.2
OLD_LIBS+=usr/lib/libmagic.so.2
OLD_LIBS+=usr/lib/libmemstat.so.1
OLD_LIBS+=usr/lib/libmenu.so.3
OLD_LIBS+=usr/lib/libmenuw.so.3
OLD_LIBS+=usr/lib/libmilter.so.3
OLD_LIBS+=usr/lib/libmp.so.5
OLD_LIBS+=usr/lib/libncp.so.2
OLD_LIBS+=usr/lib/libnetgraph.so.2
OLD_LIBS+=usr/lib/libngatm.so.2
OLD_LIBS+=usr/lib/libopie.so.4
OLD_LIBS+=usr/lib/libpanel.so.3
OLD_LIBS+=usr/lib/libpanelw.so.3
OLD_LIBS+=usr/lib/libpmc.so.3
OLD_LIBS+=usr/lib/libradius.so.2
OLD_LIBS+=usr/lib/librpcsvc.so.3
OLD_LIBS+=usr/lib/libsdp.so.2
OLD_LIBS+=usr/lib/libsmb.so.2
OLD_LIBS+=usr/lib/libstdc++.so.5
OLD_LIBS+=usr/lib/libtacplus.so.2
OLD_LIBS+=usr/lib/libthr.so.2
OLD_LIBS+=usr/lib/libthread_db.so.2
OLD_LIBS+=usr/lib/libugidfw.so.2
OLD_LIBS+=usr/lib/libusbhid.so.2
OLD_LIBS+=usr/lib/libvgl.so.4
OLD_LIBS+=usr/lib/libwrap.so.4
OLD_LIBS+=usr/lib/libypclnt.so.2
OLD_LIBS+=usr/lib/snmp_bridge.so.3
OLD_LIBS+=usr/lib/snmp_hostres.so.3
OLD_LIBS+=usr/lib32/libatm.so.4
OLD_LIBS+=usr/lib32/libbegemot.so.2
OLD_LIBS+=usr/lib32/libbluetooth.so.2
OLD_LIBS+=usr/lib32/libbsdxml.so.2
OLD_LIBS+=usr/lib32/libbsm.so.1
OLD_LIBS+=usr/lib32/libbz2.so.2
OLD_LIBS+=usr/lib32/libcalendar.so.3
OLD_LIBS+=usr/lib32/libcam.so.3
OLD_LIBS+=usr/lib32/libcom_err.so.3
OLD_LIBS+=usr/lib32/libcrypt.so.3
OLD_LIBS+=usr/lib32/libdevinfo.so.3
OLD_LIBS+=usr/lib32/libdevstat.so.5
OLD_LIBS+=usr/lib32/libedit.so.5
OLD_LIBS+=usr/lib32/libfetch.so.4
OLD_LIBS+=usr/lib32/libform.so.3
OLD_LIBS+=usr/lib32/libformw.so.3
OLD_LIBS+=usr/lib32/libftpio.so.6
OLD_LIBS+=usr/lib32/libgeom.so.3
OLD_LIBS+=usr/lib32/libgpib.so.1
OLD_LIBS+=usr/lib32/libipsec.so.2
OLD_LIBS+=usr/lib32/libipx.so.3
OLD_LIBS+=usr/lib32/libkiconv.so.2
OLD_LIBS+=usr/lib32/libkse.so.2
OLD_LIBS+=usr/lib32/libkvm.so.3
OLD_LIBS+=usr/lib32/libm.so.4
OLD_LIBS+=usr/lib32/libmagic.so.2
OLD_LIBS+=usr/lib32/libmd.so.3
OLD_LIBS+=usr/lib32/libmemstat.so.1
OLD_LIBS+=usr/lib32/libmenu.so.3
OLD_LIBS+=usr/lib32/libmenuw.so.3
OLD_LIBS+=usr/lib32/libmilter.so.3
OLD_LIBS+=usr/lib32/libmp.so.5
OLD_LIBS+=usr/lib32/libncp.so.2
OLD_LIBS+=usr/lib32/libnetgraph.so.2
OLD_LIBS+=usr/lib32/libngatm.so.2
OLD_LIBS+=usr/lib32/libopie.so.4
OLD_LIBS+=usr/lib32/libpanel.so.3
OLD_LIBS+=usr/lib32/libpanelw.so.3
OLD_LIBS+=usr/lib32/libpcap.so.4
OLD_LIBS+=usr/lib32/libpmc.so.3
OLD_LIBS+=usr/lib32/libpthread.so.2
OLD_LIBS+=usr/lib32/libradius.so.2
OLD_LIBS+=usr/lib32/librpcsvc.so.3
OLD_LIBS+=usr/lib32/libsbuf.so.3
OLD_LIBS+=usr/lib32/libsdp.so.2
OLD_LIBS+=usr/lib32/libsmb.so.2
OLD_LIBS+=usr/lib32/libstdc++.so.5
OLD_LIBS+=usr/lib32/libtacplus.so.2
OLD_LIBS+=usr/lib32/libthr.so.2
OLD_LIBS+=usr/lib32/libthread_db.so.2
OLD_LIBS+=usr/lib32/libufs.so.3
OLD_LIBS+=usr/lib32/libugidfw.so.2
OLD_LIBS+=usr/lib32/libusbhid.so.2
OLD_LIBS+=usr/lib32/libutil.so.6
OLD_LIBS+=usr/lib32/libvgl.so.4
OLD_LIBS+=usr/lib32/libwrap.so.4
OLD_LIBS+=usr/lib32/libypclnt.so.2
OLD_LIBS+=usr/lib32/libz.so.3
# 20070519: GCC 4.2
OLD_FILES+=usr/bin/f77
OLD_FILES+=usr/bin/protoize
OLD_FILES+=usr/include/g2c.h
OLD_FILES+=usr/libexec/f771
OLD_FILES+=usr/share/info/g77.info.gz
OLD_FILES+=usr/share/man/man1/f77.1.gz
OLD_FILES+=usr/include/c++/3.4/algorithm
OLD_FILES+=usr/include/c++/3.4/backward/algo.h
OLD_FILES+=usr/include/c++/3.4/backward/algobase.h
OLD_FILES+=usr/include/c++/3.4/backward/alloc.h
OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.4/backward/bvector.h
OLD_FILES+=usr/include/c++/3.4/backward/complex.h
OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.4/backward/deque.h
OLD_FILES+=usr/include/c++/3.4/backward/fstream.h
OLD_FILES+=usr/include/c++/3.4/backward/function.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.4/backward/heap.h
OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.4/backward/iostream.h
OLD_FILES+=usr/include/c++/3.4/backward/istream.h
OLD_FILES+=usr/include/c++/3.4/backward/iterator.h
OLD_FILES+=usr/include/c++/3.4/backward/list.h
OLD_FILES+=usr/include/c++/3.4/backward/map.h
OLD_FILES+=usr/include/c++/3.4/backward/multimap.h
OLD_FILES+=usr/include/c++/3.4/backward/multiset.h
OLD_FILES+=usr/include/c++/3.4/backward/new.h
OLD_FILES+=usr/include/c++/3.4/backward/ostream.h
OLD_FILES+=usr/include/c++/3.4/backward/pair.h
OLD_FILES+=usr/include/c++/3.4/backward/queue.h
OLD_FILES+=usr/include/c++/3.4/backward/rope.h
OLD_FILES+=usr/include/c++/3.4/backward/set.h
OLD_FILES+=usr/include/c++/3.4/backward/slist.h
OLD_FILES+=usr/include/c++/3.4/backward/stack.h
OLD_FILES+=usr/include/c++/3.4/backward/stream.h
OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.4/backward/strstream
OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.4/backward/tree.h
OLD_FILES+=usr/include/c++/3.4/backward/vector.h
OLD_FILES+=usr/include/c++/3.4/bits/allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h
OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/c++config.h
OLD_FILES+=usr/include/c++/3.4/bits/c++io.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h
OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr.h
OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/list.tcc
OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/postypes.h
OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.4/bits/time_members.h
OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h
OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc
OLD_FILES+=usr/include/c++/3.4/bitset
OLD_FILES+=usr/include/c++/3.4/cassert
OLD_FILES+=usr/include/c++/3.4/cctype
OLD_FILES+=usr/include/c++/3.4/cerrno
OLD_FILES+=usr/include/c++/3.4/cfloat
OLD_FILES+=usr/include/c++/3.4/ciso646
OLD_FILES+=usr/include/c++/3.4/climits
OLD_FILES+=usr/include/c++/3.4/clocale
OLD_FILES+=usr/include/c++/3.4/cmath
OLD_FILES+=usr/include/c++/3.4/complex
OLD_FILES+=usr/include/c++/3.4/csetjmp
OLD_FILES+=usr/include/c++/3.4/csignal
OLD_FILES+=usr/include/c++/3.4/cstdarg
OLD_FILES+=usr/include/c++/3.4/cstddef
OLD_FILES+=usr/include/c++/3.4/cstdio
OLD_FILES+=usr/include/c++/3.4/cstdlib
OLD_FILES+=usr/include/c++/3.4/cstring
OLD_FILES+=usr/include/c++/3.4/ctime
OLD_FILES+=usr/include/c++/3.4/cwchar
OLD_FILES+=usr/include/c++/3.4/cwctype
OLD_FILES+=usr/include/c++/3.4/cxxabi.h
OLD_FILES+=usr/include/c++/3.4/debug/bitset
OLD_FILES+=usr/include/c++/3.4/debug/debug.h
OLD_FILES+=usr/include/c++/3.4/debug/deque
OLD_FILES+=usr/include/c++/3.4/debug/formatter.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_map
OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_set
OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h
OLD_FILES+=usr/include/c++/3.4/debug/list
OLD_FILES+=usr/include/c++/3.4/debug/map
OLD_FILES+=usr/include/c++/3.4/debug/map.h
OLD_FILES+=usr/include/c++/3.4/debug/multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/3.4/debug/set
OLD_FILES+=usr/include/c++/3.4/debug/set.h
OLD_FILES+=usr/include/c++/3.4/debug/string
OLD_FILES+=usr/include/c++/3.4/debug/vector
OLD_FILES+=usr/include/c++/3.4/deque
OLD_FILES+=usr/include/c++/3.4/exception
OLD_FILES+=usr/include/c++/3.4/exception_defines.h
OLD_FILES+=usr/include/c++/3.4/ext/algorithm
OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/functional
OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h
OLD_FILES+=usr/include/c++/3.4/ext/hash_map
OLD_FILES+=usr/include/c++/3.4/ext/hash_set
OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h
OLD_FILES+=usr/include/c++/3.4/ext/iterator
OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/memory
OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/numeric
OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/rb_tree
OLD_FILES+=usr/include/c++/3.4/ext/rope
OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.4/ext/slist
OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/3.4/fstream
OLD_FILES+=usr/include/c++/3.4/functional
OLD_FILES+=usr/include/c++/3.4/iomanip
OLD_FILES+=usr/include/c++/3.4/ios
OLD_FILES+=usr/include/c++/3.4/iosfwd
OLD_FILES+=usr/include/c++/3.4/iostream
OLD_FILES+=usr/include/c++/3.4/istream
OLD_FILES+=usr/include/c++/3.4/iterator
OLD_FILES+=usr/include/c++/3.4/limits
OLD_FILES+=usr/include/c++/3.4/list
OLD_FILES+=usr/include/c++/3.4/locale
OLD_FILES+=usr/include/c++/3.4/map
OLD_FILES+=usr/include/c++/3.4/memory
OLD_FILES+=usr/include/c++/3.4/new
OLD_FILES+=usr/include/c++/3.4/numeric
OLD_FILES+=usr/include/c++/3.4/ostream
OLD_FILES+=usr/include/c++/3.4/queue
OLD_FILES+=usr/include/c++/3.4/set
OLD_FILES+=usr/include/c++/3.4/sstream
OLD_FILES+=usr/include/c++/3.4/stack
OLD_FILES+=usr/include/c++/3.4/stdexcept
OLD_FILES+=usr/include/c++/3.4/streambuf
OLD_FILES+=usr/include/c++/3.4/string
OLD_FILES+=usr/include/c++/3.4/typeinfo
OLD_FILES+=usr/include/c++/3.4/utility
OLD_FILES+=usr/include/c++/3.4/valarray
OLD_FILES+=usr/include/c++/3.4/vector
OLD_DIRS+=usr/include/c++/3.4/backward
OLD_DIRS+=usr/include/c++/3.4/bits
OLD_DIRS+=usr/include/c++/3.4/debug
OLD_DIRS+=usr/include/c++/3.4/ext
OLD_DIRS+=usr/include/c++/3.4
# 20070510: zpool/zfs moved to /sbin
OLD_FILES+=usr/sbin/zfs
OLD_FILES+=usr/sbin/zpool
# 20070423: rc.bluetooth (examples) removed
OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth
OLD_DIRS+=usr/share/examples/netgraph/bluetooth
# 20070421: worm.4 removed
OLD_FILES+=usr/share/man/man4/worm.4.gz
# 20070417: trunk(4) renamed to lagg(4)
OLD_FILES+=usr/include/net/if_trunk.h
# 20070409: uuidgen moved to /bin/
OLD_FILES+=usr/bin/uuidgen
# 20070328: bzip2 1.0.4
OLD_FILES+=usr/share/info/bzip2.info.gz
# 20070303: libarchive 2.0
OLD_LIBS+=usr/lib/libarchive.so.3
OLD_LIBS+=usr/lib32/libarchive.so.3
# 20070301: remove addr2ascii and ascii2addr
OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz
OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz
# 20070225: vm_page_unmanage() removed
OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz
# 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9)
OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz
# 20070212: kame.4 removed
OLD_FILES+=usr/share/man/man4/kame.4.gz
# 20070201: remove libmytinfo link
OLD_FILES+=usr/lib/libmytinfo.a
OLD_FILES+=usr/lib/libmytinfo.so
OLD_FILES+=usr/lib/libmytinfo_p.a
OLD_FILES+=usr/lib/libmytinfow.a
OLD_FILES+=usr/lib/libmytinfow.so
OLD_FILES+=usr/lib/libmytinfow_p.a
OLD_FILES+=usr/lib32/libmytinfo.a
OLD_FILES+=usr/lib32/libmytinfo.so
OLD_FILES+=usr/lib32/libmytinfo_p.a
OLD_FILES+=usr/lib32/libmytinfow.a
OLD_FILES+=usr/lib32/libmytinfow.so
OLD_FILES+=usr/lib32/libmytinfow_p.a
# 20070128: remove vnconfig
OLD_FILES+=usr/sbin/vnconfig
# 20070127: remove bpf_compat.h
OLD_FILES+=usr/include/net/bpf_compat.h
# 20070125: objformat bites the dust
OLD_FILES+=usr/bin/objformat
OLD_FILES+=usr/share/man/man1/objformat.1.gz
OLD_FILES+=usr/include/objformat.h
OLD_FILES+=usr/share/man/man3/getobjformat.3.gz
# 20061201: remove symlink to *.so.4 libalias modules
OLD_FILES+=usr/lib/libalias_cuseeme.so
OLD_FILES+=usr/lib/libalias_dummy.so
OLD_FILES+=usr/lib/libalias_ftp.so
OLD_FILES+=usr/lib/libalias_irc.so
OLD_FILES+=usr/lib/libalias_nbt.so
OLD_FILES+=usr/lib/libalias_pptp.so
OLD_FILES+=usr/lib/libalias_skinny.so
OLD_FILES+=usr/lib/libalias_smedia.so
# 20061201: remove old *.so.4 libalias modules
OLD_FILES+=lib/libalias_cuseeme.so.4
OLD_FILES+=lib/libalias_dummy.so.4
OLD_FILES+=lib/libalias_ftp.so.4
OLD_FILES+=lib/libalias_irc.so.4
OLD_FILES+=lib/libalias_nbt.so.4
OLD_FILES+=lib/libalias_pptp.so.4
OLD_FILES+=lib/libalias_skinny.so.4
OLD_FILES+=lib/libalias_smedia.so.4
# 20061126: remove old man page
OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz
# 20061125: remove old man page
OLD_FILES+=usr/share/man/man9/devsw.9.gz
# 20061122: remove obsolete mount programs
OLD_FILES+=sbin/mount_devfs
OLD_FILES+=sbin/mount_ext2fs
OLD_FILES+=sbin/mount_fdescfs
OLD_FILES+=sbin/mount_linprocfs
OLD_FILES+=sbin/mount_procfs
OLD_FILES+=sbin/mount_std
OLD_FILES+=rescue/mount_devfs
OLD_FILES+=rescue/mount_ext2fs
OLD_FILES+=rescue/mount_fdescfs
OLD_FILES+=rescue/mount_linprocfs
OLD_FILES+=rescue/mount_procfs
OLD_FILES+=rescue/mount_std
OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz
OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_std.8.gz
# 20061116: uhidev.4 removed
OLD_FILES+=usr/share/man/man4/uhidev.4.gz
# 20061106: archive_write_prepare.3 removed
OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz
# 20061018: pccardc removed
OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz
# 20060930: demangle.h from contrib/libstdc++/include/ext/
OLD_FILES+=usr/include/c++/3.4/ext/demangle.h
# 20060929: mrouted removed
OLD_FILES+=usr/sbin/map-mbone
OLD_FILES+=usr/sbin/mrinfo
OLD_FILES+=usr/sbin/mrouted
OLD_FILES+=usr/sbin/mtrace
OLD_FILES+=usr/share/man/man8/map-mbone.8.gz
OLD_FILES+=usr/share/man/man8/mrinfo.8.gz
OLD_FILES+=usr/share/man/man8/mrouted.8.gz
OLD_FILES+=usr/share/man/man8/mtrace.8.gz
# 20060924: tcpslice removed
OLD_FILES+=usr/sbin/tcpslice
OLD_FILES+=usr/share/man/man1/tcpslice.1.gz
# 20060829: kvmdb cleanup script removed
OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb
# 20060822: ramdisk{,-own} have been replaced by mdconfig{,2}
OLD_FILES+=etc/rc.d/ramdisk
OLD_FILES+=etc/rc.d/ramdisk-own
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_FILES+=usr/include/openssl/eng_int.h
OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h
OLD_FILES+=usr/include/openssl/hw_aep_err.h
OLD_FILES+=usr/include/openssl/hw_atalla_err.h
OLD_FILES+=usr/include/openssl/hw_cswift_err.h
OLD_FILES+=usr/include/openssl/hw_ncipher_err.h
OLD_FILES+=usr/include/openssl/hw_nuron_err.h
OLD_FILES+=usr/include/openssl/hw_sureware_err.h
OLD_FILES+=usr/include/openssl/hw_ubsec_err.h
# 20060713: mount_linsysfs(8) never existed in 7.x
OLD_FILES+=sbin/mount_linsysfs
OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz
# 20060704: KAME compat file net_osdep.h removed
OLD_FILES+=usr/include/net/net_osdep.h
# 20060605: man page links removed by OpenBSM 1.0 alpha 6 import
OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz
# 20060517: pcvt removed
OLD_FILES+=usr/share/pcvt/README.FIRST
OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german
OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh
OLD_FILES+=usr/share/pcvt/Etc/pcvt.el
OLD_FILES+=usr/share/pcvt/Etc/Terminfo
OLD_FILES+=usr/share/pcvt/Etc/Termcap
OLD_DIRS+=usr/share/pcvt/Etc
OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP
OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences
OLD_FILES+=usr/share/pcvt/Doc/Charsets
OLD_FILES+=usr/share/pcvt/Doc/CharGen
OLD_FILES+=usr/share/pcvt/Doc/Bibliography
OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements
OLD_DIRS+=usr/share/pcvt/Doc
OLD_DIRS+=usr/share/pcvt
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808
OLD_DIRS+=usr/share/misc/pcvtfonts
OLD_FILES+=usr/share/misc/keycap.pcvt
OLD_FILES+=usr/share/man/man8/ispcvt.8.gz
OLD_FILES+=usr/share/man/man5/keycap.5.gz
OLD_FILES+=usr/share/man/man4/pcvt.4.gz
OLD_FILES+=usr/share/man/man3/kgetstr.3.gz
OLD_FILES+=usr/share/man/man3/kgetnum.3.gz
OLD_FILES+=usr/share/man/man3/kgetflag.3.gz
OLD_FILES+=usr/share/man/man3/kgetent.3.gz
OLD_FILES+=usr/share/man/man3/keycap.3.gz
OLD_FILES+=usr/share/man/man1/vt220keys.1.gz
OLD_FILES+=usr/share/man/man1/scon.1.gz
OLD_FILES+=usr/share/man/man1/loadfont.1.gz
OLD_FILES+=usr/share/man/man1/kcon.1.gz
OLD_FILES+=usr/share/man/man1/fontedit.1.gz
OLD_FILES+=usr/share/man/man1/cursor.1.gz
OLD_FILES+=usr/sbin/vt220keys
OLD_FILES+=usr/sbin/scon
OLD_FILES+=usr/sbin/loadfont
OLD_FILES+=usr/sbin/kcon
OLD_FILES+=usr/sbin/ispcvt
OLD_FILES+=usr/sbin/fontedit
OLD_FILES+=usr/sbin/cursor
OLD_FILES+=usr/lib/libkeycap_p.a
OLD_FILES+=usr/lib/libkeycap.a
OLD_FILES+=usr/include/machine/pcvt_ioctl.h
# 20060514: lnc(4) replaced by le(4)
OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz
# 20060512: remove ip6fw
OLD_FILES+=etc/periodic/security/600.ip6fwdenied
OLD_FILES+=etc/periodic/security/650.ip6fwlimit
OLD_FILES+=sbin/ip6fw
OLD_FILES+=usr/include/netinet6/ip6_fw.h
OLD_FILES+=usr/share/man/man8/ip6fw.8.gz
# 20060424: sab(4) removed
OLD_FILES+=usr/share/man/man4/sab.4.gz
# 20060328: remove redundant rc.d script
OLD_FILES+=etc/rc.d/ike
# 20060127: revert libdisk to static-only
OLD_FILES+=usr/lib/libdisk.so
# 20060115: sys/pccard includes cleanup
OLD_FILES+=usr/include/pccard/driver.h
OLD_FILES+=usr/include/pccard/i82365.h
OLD_FILES+=usr/include/pccard/meciareg.h
OLD_FILES+=usr/include/pccard/pccard_nbk.h
OLD_FILES+=usr/include/pccard/pcic_pci.h
OLD_FILES+=usr/include/pccard/pcicvar.h
OLD_FILES+=usr/include/pccard/slot.h
# 20051215: rescue/nextboot.sh renamed to rescue/nextboot
OLD_FILES+=rescue/nextboot.sh
# 20051214: usbd(8) removed
OLD_FILES+=etc/rc.d/usbd
OLD_FILES+=etc/usbd.conf
OLD_FILES+=usr/sbin/usbd
OLD_FILES+=usr/share/man/man8/usbd.8.gz
# 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience
OLD_FILES+=etc/rc.d/ppp-user
# 20051012: setkey(8) moved to /sbin/
OLD_FILES+=usr/sbin/setkey
# 20050930: pccardd(8) removed
OLD_FILES+=usr/sbin/pccardd
OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz
OLD_FILES+=usr/share/man/man8/pccardd.8.gz
# 20050927: bridge(4) replaced by if_bridge(4)
OLD_FILES+=usr/include/net/bridge.h
# 20050831: not implemented
OLD_FILES+=usr/share/man/man3/getino.3.gz
OLD_FILES+=usr/share/man/man3/putino.3.gz
# 20050825: T/TCP retired several months ago
OLD_FILES+=usr/share/man/man4/ttcp.4.gz
# 20050805 tn3270 retired long ago
OLD_FILES+=usr/share/misc/map3270
# 20050801: too old to be interesting here
OLD_FILES+=usr/share/doc/papers/px.ps.gz
# 20050721: moved to ports
OLD_FILES+=usr/sbin/vttest
OLD_FILES+=usr/share/man/man1/vttest.1.gz
# 20050617: wpa man pages moved to section 8
OLD_FILES+=usr/share/man/man1/hostapd.1.gz
OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz
# 20050610: rexecd (insecure by design)
OLD_FILES+=etc/pam.d/rexecd
OLD_FILES+=usr/share/man/man8/rexecd.8.gz
OLD_FILES+=usr/libexec/rexecd
# 20050606: OpenBSD dhclient replaces ISC one
OLD_FILES+=bin/omshell
OLD_FILES+=sbin/omshell
OLD_FILES+=usr/share/man/man1/omshell.1.gz
OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz
# 200504XX: ipf tools moved from /usr to /
OLD_FILES+=rescue/ipfs
OLD_FILES+=rescue/ipfstat
OLD_FILES+=rescue/ipmon
OLD_FILES+=rescue/ipnat
OLD_FILES+=usr/sbin/ipftest
OLD_FILES+=usr/sbin/ipresend
OLD_FILES+=usr/sbin/ipsend
OLD_FILES+=usr/sbin/iptest
OLD_FILES+=usr/share/man/man1/ipnat.1.gz
OLD_FILES+=usr/share/man/man1/ipsend.1.gz
OLD_FILES+=usr/share/man/man1/iptest.1.gz
OLD_FILES+=usr/share/man/man5/ipsend.5.gz
# 200503XX: bsdtar takes over gtar
OLD_FILES+=usr/bin/gtar
OLD_FILES+=usr/share/man/man1/gtar.1.gz
# 200503XX
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
# 20050324: updated release infrastructure
OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz
# 20050317: removed from BIND 9 distribution
OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS
# 2005XXXX:
OLD_FILES+=sbin/mount_autofs
OLD_FILES+=usr/lib/libautofs.a
OLD_FILES+=usr/lib/libautofs.so
OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz
# 20050203: Merged with fortunes
OLD_FILES+=usr/share/games/fortune/fortunes2
OLD_FILES+=usr/share/games/fortune/fortunes2.dat
# 200501XX:
OLD_FILES+=usr/libexec/getNAME
# 200411XX: gvinum replaces vinum
OLD_FILES+=bin/vinum
OLD_FILES+=rescue/vinum
OLD_FILES+=sbin/vinum
OLD_FILES+=usr/share/man/man8/vinum.8.gz
# 200411XX: libxpg4 removal
OLD_FILES+=usr/lib/libxpg4.a
OLD_FILES+=usr/lib/libxpg4.so
OLD_FILES+=usr/lib/libxpg4_p.a
# 20041109: replaced by em(4)
OLD_FILES+=usr/share/man/man4/gx.4.gz
OLD_FILES+=usr/share/man/man4/if_gx.4.gz
# 20041017: rune interface removed
OLD_FILES+=usr/include/rune.h
OLD_FILES+=usr/share/man/man3/fgetrune.3.gz
OLD_FILES+=usr/share/man/man3/fputrune.3.gz
OLD_FILES+=usr/share/man/man3/fungetrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrune.3.gz
OLD_FILES+=usr/share/man/man3/rune.3.gz
OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz
OLD_FILES+=usr/share/man/man3/sgetrune.3.gz
OLD_FILES+=usr/share/man/man3/sputrune.3.gz
# 20040925: bind9 import
OLD_FILES+=usr/bin/dnskeygen
OLD_FILES+=usr/bin/dnsquery
OLD_FILES+=usr/lib/libisc.a
OLD_FILES+=usr/lib/libisc.so
OLD_FILES+=usr/lib/libisc_p.a
OLD_FILES+=usr/libexec/named-xfer
OLD_FILES+=usr/sbin/named.restart
OLD_FILES+=usr/sbin/ndc
OLD_FILES+=usr/sbin/nslookup
OLD_FILES+=usr/sbin/nsupdate
OLD_FILES+=usr/share/doc/bind/html/acl.html
OLD_FILES+=usr/share/doc/bind/html/address_list.html
OLD_FILES+=usr/share/doc/bind/html/comments.html
OLD_FILES+=usr/share/doc/bind/html/config.html
OLD_FILES+=usr/share/doc/bind/html/controls.html
OLD_FILES+=usr/share/doc/bind/html/docdef.html
OLD_FILES+=usr/share/doc/bind/html/example.html
OLD_FILES+=usr/share/doc/bind/html/include.html
OLD_FILES+=usr/share/doc/bind/html/index.html
OLD_FILES+=usr/share/doc/bind/html/key.html
OLD_FILES+=usr/share/doc/bind/html/logging.html
OLD_FILES+=usr/share/doc/bind/html/master.html
OLD_FILES+=usr/share/doc/bind/html/options.html
OLD_FILES+=usr/share/doc/bind/html/server.html
OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html
OLD_FILES+=usr/share/doc/bind/html/zone.html
OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate
OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2
OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2
OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt
OLD_FILES+=usr/share/doc/bind/misc/style.txt
OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz
OLD_FILES+=usr/share/man/man1/dnsquery.1.gz
OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz
OLD_FILES+=usr/share/man/man8/named-xfer.8.gz
OLD_FILES+=usr/share/man/man8/named.restart.8.gz
OLD_FILES+=usr/share/man/man8/ndc.8.gz
OLD_FILES+=usr/share/man/man8/nslookup.8.gz
# 200409XX
OLD_FILES+=usr/share/man/man3/ENSURE.3.gz
OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INSIST.3.gz
OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz
OLD_FILES+=usr/share/man/man3/assertions.3.gz
OLD_FILES+=usr/share/man/man3/bitncmp.3.gz
OLD_FILES+=usr/share/man/man3/evAddTime.3.gz
OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz
OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz
OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz
OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz
OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz
OLD_FILES+=usr/share/man/man3/evConnect.3.gz
OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz
OLD_FILES+=usr/share/man/man3/evConsTime.3.gz
OLD_FILES+=usr/share/man/man3/evCreate.3.gz
OLD_FILES+=usr/share/man/man3/evDefer.3.gz
OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz
OLD_FILES+=usr/share/man/man3/evDestroy.3.gz
OLD_FILES+=usr/share/man/man3/evDispatch.3.gz
OLD_FILES+=usr/share/man/man3/evDo.3.gz
OLD_FILES+=usr/share/man/man3/evDrop.3.gz
OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz
OLD_FILES+=usr/share/man/man3/evGetNext.3.gz
OLD_FILES+=usr/share/man/man3/evHold.3.gz
OLD_FILES+=usr/share/man/man3/evInitID.3.gz
OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz
OLD_FILES+=usr/share/man/man3/evListen.3.gz
OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz
OLD_FILES+=usr/share/man/man3/evNowTime.3.gz
OLD_FILES+=usr/share/man/man3/evPrintf.3.gz
OLD_FILES+=usr/share/man/man3/evRead.3.gz
OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz
OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz
OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz
OLD_FILES+=usr/share/man/man3/evSubTime.3.gz
OLD_FILES+=usr/share/man/man3/evTestID.3.gz
OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz
OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz
OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz
OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz
OLD_FILES+=usr/share/man/man3/evUnhold.3.gz
OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evUnwait.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz
OLD_FILES+=usr/share/man/man3/evWrite.3.gz
OLD_FILES+=usr/share/man/man3/eventlib.3.gz
OLD_FILES+=usr/share/man/man3/heap.3.gz
OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz
OLD_FILES+=usr/share/man/man3/heap_delete.3.gz
OLD_FILES+=usr/share/man/man3/heap_element.3.gz
OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz
OLD_FILES+=usr/share/man/man3/heap_free.3.gz
OLD_FILES+=usr/share/man/man3/heap_increased.3.gz
OLD_FILES+=usr/share/man/man3/heap_insert.3.gz
OLD_FILES+=usr/share/man/man3/heap_new.3.gz
OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz
OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz
OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_free_context.3.gz
OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz
OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz
OLD_FILES+=usr/share/man/man3/log_new_context.3.gz
OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_option.3.gz
OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz
OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz
OLD_FILES+=usr/share/man/man3/log_write.3.gz
OLD_FILES+=usr/share/man/man3/logging.3.gz
OLD_FILES+=usr/share/man/man3/memcluster.3.gz
OLD_FILES+=usr/share/man/man3/memget.3.gz
OLD_FILES+=usr/share/man/man3/memput.3.gz
OLD_FILES+=usr/share/man/man3/memstats.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.
OLD_FILES+=usr/share/man/man3/sigwait.3.gz
OLD_FILES+=usr/share/man/man3/tree_add.3.gz
OLD_FILES+=usr/share/man/man3/tree_delete.3.gz
OLD_FILES+=usr/share/man/man3/tree_init.3.gz
OLD_FILES+=usr/share/man/man3/tree_mung.3.gz
OLD_FILES+=usr/share/man/man3/tree_srch.3.gz
OLD_FILES+=usr/share/man/man3/tree_trav.3.gz
# 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS
OLD_FILES+=lib/geom/geom_concat.so.1
OLD_FILES+=lib/geom/geom_label.so.1
OLD_FILES+=lib/geom/geom_nop.so.1
OLD_FILES+=lib/geom/geom_stripe.so.1
# 20040728: GCC 3.4.2
OLD_DIRS+=usr/include/c++/3.3
OLD_FILES+=usr/include/c++/3.3/FlexLexer.h
OLD_FILES+=usr/include/c++/3.3/algorithm
OLD_FILES+=usr/include/c++/3.3/backward/algo.h
OLD_FILES+=usr/include/c++/3.3/backward/algobase.h
OLD_FILES+=usr/include/c++/3.3/backward/alloc.h
OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.3/backward/bvector.h
OLD_FILES+=usr/include/c++/3.3/backward/complex.h
OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.3/backward/deque.h
OLD_FILES+=usr/include/c++/3.3/backward/fstream.h
OLD_FILES+=usr/include/c++/3.3/backward/function.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.3/backward/heap.h
OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.3/backward/iostream.h
OLD_FILES+=usr/include/c++/3.3/backward/istream.h
OLD_FILES+=usr/include/c++/3.3/backward/iterator.h
OLD_FILES+=usr/include/c++/3.3/backward/list.h
OLD_FILES+=usr/include/c++/3.3/backward/map.h
OLD_FILES+=usr/include/c++/3.3/backward/multimap.h
OLD_FILES+=usr/include/c++/3.3/backward/multiset.h
OLD_FILES+=usr/include/c++/3.3/backward/new.h
OLD_FILES+=usr/include/c++/3.3/backward/ostream.h
OLD_FILES+=usr/include/c++/3.3/backward/pair.h
OLD_FILES+=usr/include/c++/3.3/backward/queue.h
OLD_FILES+=usr/include/c++/3.3/backward/rope.h
OLD_FILES+=usr/include/c++/3.3/backward/set.h
OLD_FILES+=usr/include/c++/3.3/backward/slist.h
OLD_FILES+=usr/include/c++/3.3/backward/stack.h
OLD_FILES+=usr/include/c++/3.3/backward/stream.h
OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.3/backward/strstream
OLD_FILES+=usr/include/c++/3.3/backward/strstream.h
OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.3/backward/tree.h
OLD_FILES+=usr/include/c++/3.3/backward/vector.h
OLD_DIRS+=usr/include/c++/3.3/backward
OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/c++config.h
OLD_FILES+=usr/include/c++/3.3/bits/c++io.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.3/bits/fpos.h
OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr.h
OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/list.tcc
OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h
OLD_FILES+=usr/include/c++/3.3/bits/slice.h
OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.3/bits/time_members.h
OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h
OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc
OLD_DIRS+=usr/include/c++/3.3/bits
OLD_FILES+=usr/include/c++/3.3/bitset
OLD_FILES+=usr/include/c++/3.3/cassert
OLD_FILES+=usr/include/c++/3.3/cctype
OLD_FILES+=usr/include/c++/3.3/cerrno
OLD_FILES+=usr/include/c++/3.3/cfloat
OLD_FILES+=usr/include/c++/3.3/ciso646
OLD_FILES+=usr/include/c++/3.3/climits
OLD_FILES+=usr/include/c++/3.3/clocale
OLD_FILES+=usr/include/c++/3.3/cmath
OLD_FILES+=usr/include/c++/3.3/complex
OLD_FILES+=usr/include/c++/3.3/csetjmp
OLD_FILES+=usr/include/c++/3.3/csignal
OLD_FILES+=usr/include/c++/3.3/cstdarg
OLD_FILES+=usr/include/c++/3.3/cstddef
OLD_FILES+=usr/include/c++/3.3/cstdio
OLD_FILES+=usr/include/c++/3.3/cstdlib
OLD_FILES+=usr/include/c++/3.3/cstring
OLD_FILES+=usr/include/c++/3.3/ctime
OLD_FILES+=usr/include/c++/3.3/cwchar
OLD_FILES+=usr/include/c++/3.3/cwctype
OLD_FILES+=usr/include/c++/3.3/cxxabi.h
OLD_FILES+=usr/include/c++/3.3/deque
OLD_FILES+=usr/include/c++/3.3/exception
OLD_FILES+=usr/include/c++/3.3/exception_defines.h
OLD_FILES+=usr/include/c++/3.3/ext/algorithm
OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/functional
OLD_FILES+=usr/include/c++/3.3/ext/hash_map
OLD_FILES+=usr/include/c++/3.3/ext/hash_set
OLD_FILES+=usr/include/c++/3.3/ext/iterator
OLD_FILES+=usr/include/c++/3.3/ext/memory
OLD_FILES+=usr/include/c++/3.3/ext/numeric
OLD_FILES+=usr/include/c++/3.3/ext/rb_tree
OLD_FILES+=usr/include/c++/3.3/ext/rope
OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.3/ext/slist
OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h
OLD_DIRS+=usr/include/c++/3.3/ext
OLD_FILES+=usr/include/c++/3.3/fstream
OLD_FILES+=usr/include/c++/3.3/functional
OLD_FILES+=usr/include/c++/3.3/iomanip
OLD_FILES+=usr/include/c++/3.3/ios
OLD_FILES+=usr/include/c++/3.3/iosfwd
OLD_FILES+=usr/include/c++/3.3/iostream
OLD_FILES+=usr/include/c++/3.3/istream
OLD_FILES+=usr/include/c++/3.3/iterator
OLD_FILES+=usr/include/c++/3.3/limits
OLD_FILES+=usr/include/c++/3.3/list
OLD_FILES+=usr/include/c++/3.3/locale
OLD_FILES+=usr/include/c++/3.3/map
OLD_FILES+=usr/include/c++/3.3/memory
OLD_FILES+=usr/include/c++/3.3/new
OLD_FILES+=usr/include/c++/3.3/numeric
OLD_FILES+=usr/include/c++/3.3/ostream
OLD_FILES+=usr/include/c++/3.3/queue
OLD_FILES+=usr/include/c++/3.3/set
OLD_FILES+=usr/include/c++/3.3/sstream
OLD_FILES+=usr/include/c++/3.3/stack
OLD_FILES+=usr/include/c++/3.3/stdexcept
OLD_FILES+=usr/include/c++/3.3/streambuf
OLD_FILES+=usr/include/c++/3.3/string
OLD_FILES+=usr/include/c++/3.3/typeinfo
OLD_FILES+=usr/include/c++/3.3/utility
OLD_FILES+=usr/include/c++/3.3/valarray
OLD_FILES+=usr/include/c++/3.3/vector
# 20040713: fla(4) removed.
OLD_FILES+=usr/share/man/man4/fla.4.gz
# 200407XX
OLD_FILES+=usr/sbin/kernbb
OLD_FILES+=usr/sbin/ntp-genkeys
OLD_FILES+=usr/sbin/ntptimeset
OLD_FILES+=usr/share/man/man8/kernbb.8.gz
OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz
# 20040627: usbdevs.h and usbdevs_data.h removal
OLD_FILES+=usr/include/dev/usb/usbdevs.h
OLD_FILES+=usr/include/dev/usb/usbdevs_data.h
# 200406XX
OLD_FILES+=usr/bin/gasp
OLD_FILES+=usr/bin/gdbreplay
OLD_FILES+=usr/share/man/man1/gasp.1.gz
OLD_FILES+=sbin/mountd
OLD_FILES+=sbin/mount_fdesc
OLD_FILES+=sbin/mount_umap
OLD_FILES+=sbin/mount_union
OLD_FILES+=sbin/mount_msdos
OLD_FILES+=sbin/mount_null
OLD_FILES+=sbin/mount_kernfs
# 200405XX: arl
OLD_FILES+=usr/sbin/arlconfig
OLD_FILES+=usr/share/man/man8/arlconfig.8.gz
# 200403XX
OLD_FILES+=bin/raidctl
OLD_FILES+=sbin/raidctl
OLD_FILES+=usr/bin/sasc
OLD_FILES+=usr/sbin/sgsc
OLD_FILES+=usr/sbin/stlload
OLD_FILES+=usr/sbin/stlstats
OLD_FILES+=usr/share/man/man1/sasc.1.gz
OLD_FILES+=usr/share/man/man1/sgsc.1.gz
OLD_FILES+=usr/share/man/man4/i386/stl.4.gz
OLD_FILES+=usr/share/man/man8/raidctl.8.gz
# 20040229: clean_environment() was removed after 3 days
OLD_FILES+=usr/share/man/man3/clean_environment.3.gz
# 20040119: installed as `isdntel' in newer systems
OLD_FILES+=etc/isdn/isdntel.sh
# 200XYYZZ: /lib transition clitches
OLD_FILES+=lib/libalias.so
OLD_FILES+=lib/libatm.so
OLD_FILES+=lib/libbsdxml.so
OLD_FILES+=lib/libc.so
OLD_FILES+=lib/libcam.so
OLD_FILES+=lib/libcrypt.so
OLD_FILES+=lib/libcrypto.so
OLD_FILES+=lib/libdevstat.so
OLD_FILES+=lib/libedit.so
OLD_FILES+=lib/libgeom.so
OLD_FILES+=lib/libipsec.so
OLD_FILES+=lib/libipx.so
OLD_FILES+=lib/libkvm.so
OLD_FILES+=lib/libm.so
OLD_FILES+=lib/libmd.so
OLD_FILES+=lib/libncurses.so
OLD_FILES+=lib/libreadline.so
OLD_FILES+=lib/libsbuf.so
OLD_FILES+=lib/libufs.so
OLD_FILES+=lib/libz.so
# 200312XX
OLD_FILES+=bin/cxconfig
OLD_FILES+=sbin/cxconfig
OLD_FILES+=usr/share/man/man8/cxconfig.8.gz
# 20031016: MULTI_DRIVER_MODULE macro removed
OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz
# 200309XX
OLD_FILES+=usr/bin/symorder
OLD_FILES+=usr/share/man/man1/symorder.1.gz
# 200308XX
OLD_FILES+=usr/sbin/amldb
OLD_FILES+=usr/share/man/man8/amldb.8.gz
# 200307XX
OLD_FILES+=sbin/mount_nwfs
OLD_FILES+=sbin/mount_portalfs
OLD_FILES+=sbin/mount_smbfs
# 200306XX
OLD_FILES+=usr/sbin/dev_mkdb
OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz
# 200304XX
OLD_FILES+=usr/lib/libcipher.a
OLD_FILES+=usr/lib/libcipher.so
OLD_FILES+=usr/lib/libcipher_p.a
OLD_FILES+=usr/lib/libgmp.a
OLD_FILES+=usr/lib/libgmp.so
OLD_FILES+=usr/lib/libgmp_p.a
OLD_FILES+=usr/lib/libperl.a
OLD_FILES+=usr/lib/libperl.so
OLD_FILES+=usr/lib/libperl_p.a
OLD_FILES+=usr/lib/libposix1e.a
OLD_FILES+=usr/lib/libposix1e.so
OLD_FILES+=usr/lib/libposix1e_p.a
OLD_FILES+=usr/lib/libskey.a
OLD_FILES+=usr/lib/libskey.so
OLD_FILES+=usr/lib/libskey_p.a
OLD_FILES+=usr/libexec/tradcpp0
OLD_FILES+=usr/libexec/cpp0
# 200304XX: removal of xten
OLD_FILES+=usr/libexec/xtend
OLD_FILES+=usr/sbin/xten
OLD_FILES+=usr/share/man/man1/xten.1.gz
OLD_FILES+=usr/share/man/man8/xtend.8.gz
# 200303XX
OLD_FILES+=usr/lib/libacl.so
OLD_FILES+=usr/lib/libdescrypt.so
OLD_FILES+=usr/lib/libf2c.so
OLD_FILES+=usr/lib/libg++.so
OLD_FILES+=usr/lib/libkdb.so
OLD_FILES+=usr/lib/librsaINTL.so
OLD_FILES+=usr/lib/libscrypt.so
OLD_FILES+=usr/lib/libss.so
# 200302XX
OLD_FILES+=usr/lib/libacl.a
OLD_FILES+=usr/lib/libacl_p.a
OLD_FILES+=usr/lib/libkadm.a
OLD_FILES+=usr/lib/libkadm.so
OLD_FILES+=usr/lib/libkadm_p.a
OLD_FILES+=usr/lib/libkafs.a
OLD_FILES+=usr/lib/libkafs.so
OLD_FILES+=usr/lib/libkafs_p.a
OLD_FILES+=usr/lib/libkdb.a
OLD_FILES+=usr/lib/libkdb_p.a
OLD_FILES+=usr/lib/libkrb.a
OLD_FILES+=usr/lib/libkrb.so
OLD_FILES+=usr/lib/libkrb_p.a
OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz
OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz
OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz
OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz
OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz
OLD_FILES+=usr/share/man/man3/SSL_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz
OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz
OLD_FILES+=usr/share/man/man3/SSL_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz
OLD_FILES+=usr/share/man/man3/SSL_read.3.gz
OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_want.3.gz
OLD_FILES+=usr/share/man/man3/SSL_write.3.gz
OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz
# 200301XX
OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz
OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz
OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz
OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_random_key.3.gz
OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/man/man3/des_set_key.3.gz
OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz
# 200212XX
OLD_FILES+=usr/sbin/kenv
OLD_FILES+=usr/bin/kenv
OLD_FILES+=usr/sbin/elf2aout
# 200210XX
OLD_FILES+=usr/include/libusbhid.h
OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz
OLD_FILES+=usr/share/man/man3/CheckRules.3.gz
OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz
OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz
OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz
OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz
OLD_FILES+=usr/share/man/man3/MakeDev.3.gz
OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz
OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz
OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz
OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz
OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz
# 200210XX: most games moved to ports
OLD_FILES+=usr/share/man/man6/adventure.6.gz
OLD_FILES+=usr/share/man/man6/arithmetic.6.gz
OLD_FILES+=usr/share/man/man6/atc.6.gz
OLD_FILES+=usr/share/man/man6/backgammon.6.gz
OLD_FILES+=usr/share/man/man6/battlestar.6.gz
OLD_FILES+=usr/share/man/man6/bs.6.gz
OLD_FILES+=usr/share/man/man6/canfield.6.gz
OLD_FILES+=usr/share/man/man6/cfscores.6.gz
OLD_FILES+=usr/share/man/man6/cribbage.6.gz
OLD_FILES+=usr/share/man/man6/fish.6.gz
OLD_FILES+=usr/share/man/man6/hack.6.gz
OLD_FILES+=usr/share/man/man6/hangman.6.gz
OLD_FILES+=usr/share/man/man6/larn.6.gz
OLD_FILES+=usr/share/man/man6/mille.6.gz
OLD_FILES+=usr/share/man/man6/phantasia.6.gz
OLD_FILES+=usr/share/man/man6/piano.6.gz
OLD_FILES+=usr/share/man/man6/pig.6.gz
OLD_FILES+=usr/share/man/man6/quiz.6.gz
OLD_FILES+=usr/share/man/man6/rain.6.gz
OLD_FILES+=usr/share/man/man6/robots.6.gz
OLD_FILES+=usr/share/man/man6/rogue.6.gz
OLD_FILES+=usr/share/man/man6/sail.6.gz
OLD_FILES+=usr/share/man/man6/snake.6.gz
OLD_FILES+=usr/share/man/man6/snscore.6.gz
OLD_FILES+=usr/share/man/man6/trek.6.gz
OLD_FILES+=usr/share/man/man6/wargames.6.gz
OLD_FILES+=usr/share/man/man6/worm.6.gz
OLD_FILES+=usr/share/man/man6/worms.6.gz
OLD_FILES+=usr/share/man/man6/wump.6.gz
# 200207XX
OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz
OLD_FILES+=usr/share/man/man1aout/as.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz
OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz
OLD_FILES+=usr/share/man/man1aout/size.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz
OLD_FILES+=bin/mountd
OLD_FILES+=bin/nfsd
# 20020707 sbin/nfsd -> usr.sbin/nfsd
OLD_FILES+=sbin/nfsd
# 200206XX
OLD_FILES+=usr/lib/libpam_ssh.a
OLD_FILES+=usr/lib/libpam_ssh_p.a
OLD_FILES+=usr/bin/help
OLD_FILES+=usr/bin/sccs
.if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc"
OLD_FILES+=usr/bin/gdbserver
.endif
OLD_FILES+=usr/bin/ssh-keysign
OLD_FILES+=usr/sbin/gifconfig
OLD_FILES+=usr/sbin/prefix
# 200205XX
OLD_FILES+=usr/bin/doscmd
# 200204XX
OLD_FILES+=usr/bin/a2p
OLD_FILES+=usr/bin/ptx
OLD_FILES+=usr/bin/pod2text
OLD_FILES+=usr/bin/pod2man
OLD_FILES+=usr/bin/pod2latex
OLD_FILES+=usr/bin/pod2html
OLD_FILES+=usr/bin/h2ph
OLD_FILES+=usr/bin/dprofpp
OLD_FILES+=usr/bin/c2ph
OLD_FILES+=usr/bin/h2xs
OLD_FILES+=usr/bin/pl2pm
OLD_FILES+=usr/bin/splain
OLD_FILES+=usr/bin/s2p
OLD_FILES+=usr/bin/find2perl
OLD_FILES+=usr/sbin/pkg_update
OLD_FILES+=usr/sbin/scriptdump
# 20020409 GC kget(1), userconfig is long dead.
OLD_FILES+=sbin/kget
OLD_FILES+=usr/share/man/man8/kget.8.gz
# 200203XX
OLD_FILES+=usr/lib/libss.a
OLD_FILES+=usr/lib/libss_p.a
OLD_FILES+=usr/lib/libtelnet.a
OLD_FILES+=usr/lib/libtelnet_p.a
OLD_FILES+=usr/sbin/diskpart
# 200202XX
OLD_FILES+=usr/bin/gprof4
# 200201XX
OLD_FILES+=usr/sbin/linux
# 2001XXXX
OLD_FILES+=usr/bin/joy
OLD_FILES+=usr/sbin/ibcs2
OLD_FILES+=usr/sbin/svr4
OLD_FILES+=usr/bin/chflags
OLD_FILES+=usr/sbin/uuconv
OLD_FILES+=usr/sbin/uuchk
OLD_FILES+=usr/sbin/portmap
OLD_FILES+=usr/sbin/pmap_set
OLD_FILES+=usr/sbin/pmap_dump
OLD_FILES+=usr/sbin/mcon
OLD_FILES+=usr/sbin/stlstty
OLD_FILES+=usr/sbin/ispppcontrol
OLD_FILES+=usr/sbin/rndcontrol
# 20011001: UUCP migration to ports
OLD_FILES+=usr/bin/uucp
OLD_FILES+=usr/bin/uulog
OLD_FILES+=usr/bin/uuname
OLD_FILES+=usr/bin/uupick
OLD_FILES+=usr/bin/uusched
OLD_FILES+=usr/bin/uustat
OLD_FILES+=usr/bin/uuto
OLD_FILES+=usr/bin/uux
OLD_FILES+=usr/libexec/uucp/uucico
OLD_FILES+=usr/libexec/uucp/uuxqt
OLD_FILES+=usr/libexec/uucpd
OLD_FILES+=usr/share/man/man1/uuconv.1.gz
OLD_FILES+=usr/share/man/man1/uucp.1.gz
OLD_FILES+=usr/share/man/man1/uulog.1.gz
OLD_FILES+=usr/share/man/man1/uuname.1.gz
OLD_FILES+=usr/share/man/man1/uupick.1.gz
OLD_FILES+=usr/share/man/man1/uustat.1.gz
OLD_FILES+=usr/share/man/man1/uuto.1.gz
OLD_FILES+=usr/share/man/man1/uux.1.gz
OLD_FILES+=usr/share/man/man8/uuchk.8.gz
OLD_FILES+=usr/share/man/man8/uucico.8.gz
OLD_FILES+=usr/share/man/man8/uucpd.8.gz
OLD_FILES+=usr/share/man/man8/uusched.8.gz
OLD_FILES+=usr/share/man/man8/uuxqt.8.gz
# 20010523 mount_portal -> mount_portalfs
OLD_FILES+=sbin/mount_portal
OLD_FILES+=usr/share/man/man8/mount_portal.8.gz
# 200104XX
OLD_FILES+=usr/lib/libdescrypt.a
OLD_FILES+=usr/lib/libscrypt.a
OLD_FILES+=usr/lib/libscrypt_p.a
OLD_FILES+=usr/sbin/pim6stat
OLD_FILES+=usr/sbin/pim6sd
OLD_FILES+=usr/sbin/pim6dd
# 20010217
OLD_FILES+=usr/share/doc/bind/misc/dns-setup
# 20001200
OLD_FILES+=usr/lib/libgcc_r_pic.a
# 200009XX
OLD_FILES+=usr/lib/libRSAglue.a
OLD_FILES+=usr/lib/libRSAglue.so
OLD_FILES+=usr/lib/librsaINTL.a
OLD_FILES+=usr/lib/librsaUSA.a
OLD_FILES+=usr/lib/librsaUSA.so
# 200002XX ?
OLD_FILES+=usr/lib/libf2c.a
OLD_FILES+=usr/lib/libf2c_p.a
OLD_FILES+=usr/lib/libg++.a
OLD_FILES+=usr/lib/libg++_p.a
# 20001006
OLD_FILES+=usr/bin/miniperl
# 20000810
OLD_FILES+=usr/bin/sperl
# 200001XX
OLD_FILES+=usr/sbin/apmconf
# 199911XX
OLD_FILES+=usr/sbin/ipfstat
OLD_FILES+=usr/sbin/ipmon
OLD_FILES+=usr/sbin/ipnat
OLD_FILES+=usr/sbin/bad144
OLD_FILES+=usr/sbin/wormcontrol
OLD_FILES+=usr/sbin/named-bootconf
OLD_FILES+=usr/sbin/kvm_mkdb
OLD_FILES+=usr/sbin/keyadmin
# 199909XX
OLD_FILES+=usr/lib/libdesrypt_p.a
OLD_FILES+=sbin/ft
# 199903XX
OLD_FILES+=sbin/modload
OLD_FILES+=sbin/modunload
OLD_FILES+=usr/sbin/natd
# 199812XX
OLD_FILES+=sbin/dset
# 199809XX
OLD_FILES+=sbin/scsi
OLD_FILES+=sbin/scsiformat
OLD_FILES+=usr/sbin/ncrcontrol
OLD_FILES+=usr/sbin/tickadj
# 199806XX
OLD_FILES+=usr/sbin/mkdosfs
# 199801XX
OLD_FILES+=sbin/mount_lfs
OLD_FILES+=sbin/newlfs
OLD_FILES+=sbin/dumplfs
OLD_FILES+=usr/sbin/qcamcontrol
OLD_FILES+=usr/sbin/supscan
# 1997XXXX
OLD_FILES+=usr/sbin/sysctl
OLD_FILES+=usr/sbin/ctm_scan
OLD_FILES+=usr/sbin/addgroup
OLD_FILES+=usr/sbin/rmgroup
# 1996XXXX
OLD_FILES+=sbin/rdisc
OLD_FILES+=usr/sbin/cdplay
OLD_FILES+=usr/sbin/supfilesrv
OLD_FILES+=usr/sbin/routed
OLD_FILES+=usr/sbin/lsdev
OLD_FILES+=usr/sbin/yppasswdd
## unsorted
# do we still support aout builds?
#OLD_FILES+=usr/lib/aout/c++rt0.o
#OLD_FILES+=usr/lib/aout/crt0.o
#OLD_FILES+=usr/lib/aout/gcrt0.o
#OLD_FILES+=usr/lib/aout/scrt0.o
#OLD_FILES+=usr/lib/aout/sgcrt0.o
OLD_FILES+=usr/bin/sperl5
OLD_FILES+=usr/bin/perl5.6.0
OLD_FILES+=usr/bin/sperl5.6.0
OLD_FILES+=usr/bin/perlbc
OLD_FILES+=usr/bin/perl5.00503
OLD_FILES+=usr/bin/sperl5.00503
OLD_FILES+=usr/bin/perlbug
OLD_FILES+=usr/bin/perlcc
OLD_FILES+=usr/bin/perldoc
OLD_FILES+=usr/bin/suidperl
OLD_FILES+=usr/lib/pam_ftp.so
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Apache.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Carp.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Cookie.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Fast.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Push.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Switch.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN/FirstTime.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN/Nox.pm
OLD_FILES+=usr/libdata/perl/5.00503/Class/Struct.pm
OLD_FILES+=usr/libdata/perl/5.00503/Devel/SelfStubber.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Command.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Embed.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Install.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Installed.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Liblist.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_OS2.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Unix.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_VMS.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Win32.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MakeMaker.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Manifest.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mkbootstrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mksymlists.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Packlist.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/inst
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/testlib.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/typemap
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/xsubpp
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Mac.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/OS2.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Unix.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/VMS.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Win32.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Basename.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/CheckTree.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Compare.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Copy.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/DosGlob.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Find.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Path.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/stat.pm
OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Long.pm
OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Std.pm
OLD_FILES+=usr/libdata/perl/5.00503/I18N/Collate.pm
OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open2.pm
OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open3.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/BigFloat.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/BigInt.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/Complex.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/Trig.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/Ping.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/hostent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/netent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/protoent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/servent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Functions.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Html.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Text.pm
OLD_FILES+=usr/libdata/perl/5.00503/Search/Dict.pm
OLD_FILES+=usr/libdata/perl/5.00503/Sys/Hostname.pm
OLD_FILES+=usr/libdata/perl/5.00503/Sys/Syslog.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/Cap.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/Complete.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/ReadLine.pm
OLD_FILES+=usr/libdata/perl/5.00503/Test/Harness.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Abbrev.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/ParseWords.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Soundex.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Tabs.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Wrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Array.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Handle.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Hash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/RefHash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Scalar.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/SubstrHash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/Local.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/gmtime.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/localtime.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/tm.pm
OLD_FILES+=usr/libdata/perl/5.00503/User/grent.pm
OLD_FILES+=usr/libdata/perl/5.00503/User/pwent.pm
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/GetOptions.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/FindOption.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Configure.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/config.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Croak.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Deparse.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/CC.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Debug.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Showlex.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/makeliblinks
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bblock.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/cc_harness
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bytecode.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Stackobj.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Xref.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Lint.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Asmdata.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Assembler.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Disassembler.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/disassemble
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/assemble
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Terse.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/C.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/EXTERN.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/INTERN.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSUB.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSlock.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/av.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/bytecode.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/byterun.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cc_runtime.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/config.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cop.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/dosish.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embed.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embedvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/fakethr.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/form.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/gv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/handy.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/hv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/intrpvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/iperlsys.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/keywords.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/mg.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/nostdio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objXSUB.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objpp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/op.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/opcode.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/patchlevel.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perl.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsdio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsfio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlvars.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perly.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp_proto.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/proto.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regcomp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regexp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regnodes.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/scope.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/sv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thrdvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thread.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/unixish.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/util.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/Data/Dumper.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Select.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Socket.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Handle.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Seekable.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Pipe.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/SysV.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Msg.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Semaphore.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/.exists
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_findfile.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_expandspec.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_find_symbol_anywhere.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/DynaLoader.a
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/extralibs.ld
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/assert.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tolower.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/toupper.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/closedir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/opendir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/readdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewinddir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/errno.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/creat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fcntl.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrnam.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atan2.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/cos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fabs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/log.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/pow.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sin.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sqrt.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwnam.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/longjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/kill.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/feof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/siglongjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sigsetjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/raise.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/offsetof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/clearerr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fclose.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fdopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgets.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fileno.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fread.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/freopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fscanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fseek.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ferror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fflush.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetpos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fsetpos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ftell.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fwrite.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getchar.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gets.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/perror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/printf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putchar.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/puts.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/remove.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rename.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewind.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/scanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sscanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tmpfile.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ungetc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vfprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vsprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/abs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atexit.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atoi.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atol.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/bsearch.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/calloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/div.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exit.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/free.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getenv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/labs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ldiv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/malloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/qsort.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rand.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/realloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/srand.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/system.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memmove.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memset.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcspn.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strerror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strlen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strpbrk.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strrchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strspn.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strstr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strtok.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chmod.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fstat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/mkdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/stat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/umask.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/wait.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/waitpid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gmtime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/localtime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/time.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/alarm.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chown.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execl.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execle.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execlp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execve.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execvp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fork.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getcwd.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getegid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/geteuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgroups.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getlogin.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpgrp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getppid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/isatty.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/link.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rmdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setbuf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setvbuf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sleep.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/unlink.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/utime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/sdbm/extralibs.ld
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Errno/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/Config.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/O.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/perllocal.pod
OLD_FILES+=usr/libdata/perl/5.00503/mach/DB_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Errno.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Fcntl.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/NDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Safe.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Opcode.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/ops.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pod
OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/SDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Socket.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/attrs.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/re.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/_h2ph_pre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/a.out.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_ccb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_extend.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_periph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_sim.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_periph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_sim.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/alias.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/assert.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/bitstring.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/calendar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/camlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/com_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/com_right.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/curses.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/devstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dialog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dirent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/disktab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dlfcn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/errno.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/eti.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fcntl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fetch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/float.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/floatingpoint.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fnmatch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/form.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fstab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ftpio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fts.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/glob.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/gmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/gnuregex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/grp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/histedit.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ieeefp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ifaddrs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/inttypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/iso646.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/kvm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libatm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libdisk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libgen.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libusb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libutil.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/limits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/link.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/linker_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/locale.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/login_cap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/FlexLexer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/PlotFile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/SFile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/_G_config.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algobase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/builtinbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/bvector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/complex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/defalloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/deque.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/editbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/floatio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/fstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/function.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hashtable.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/indstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iolibio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iomanip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostreamP.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/istream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iterator.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libioP.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multimap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multiset.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/new.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pair.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/parsestream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pfstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/procbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pthread_alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/rope.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ropeimpl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/slist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stdiostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algobase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_bvector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_config.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_construct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_deque.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_function.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_fun.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hashtable.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_iterator.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multimap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multiset.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_numeric.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_pair.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_raw_storage_iter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_relops.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_rope.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_slist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tempbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_uninitialized.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_vector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/streambuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strfile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tempbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/type_traits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/vector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/math.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/memory.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/menu.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mpool.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ncurses.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ndbm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netdb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nl_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nlist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objformat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/opie.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/osreldate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/panel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/paths.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-int.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-namedb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/poll.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread_np.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pwd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ranlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/regex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/regexp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/resolv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ripemd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rune.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/runetype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sched.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/search.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/semaphore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/setjmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sgtty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sha.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/skey.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stand.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdarg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stddef.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/strhash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/string.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stringlist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/strings.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/struct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sysexits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/syslog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/taclib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/tar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/tcpd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/term.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/termcap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/termios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/time.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/timers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ttyent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/unctrl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/unistd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/utime.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/utmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/values.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/varargs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vgl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/zconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/zlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/ftp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/inet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/telnet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/tftp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/assertions.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/ctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/dst.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/eventlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/irpmarshall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/logging.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/memcluster.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ansi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_bios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_segments.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asc_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asmacros.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asnames.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/atomic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bootinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_at386.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_memio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pc98.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio_ind.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cdk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/clock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/comstats.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/console.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpu.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpufunc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cputypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cronyx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/db_machdep.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/dvcfg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/exec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/float.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/floatingpoint.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/frame.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globaldata.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globals.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/gsc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_cause.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_rbch_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_tel_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_trace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ieeefp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wavelan_ieee.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wl_wavelan.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/iic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/in_cksum.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_bt848.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_ctx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_fd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_meteor.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ipl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/joystick.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/limits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/lock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/md_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mouse.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mpapic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mtpr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_dma.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/npx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcaudioio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb_ext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcvt_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/perfmon.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/physio_proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/profile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/psl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ptrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/resource.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/segments.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/setjmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sigframe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smptests.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/soundcard.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/speaker.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/specialreg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/spigot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/stdarg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sysarch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/trap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/tss.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/uc_device.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ultrasound.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/varargs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vm86.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vmparam.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/wtio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_isppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pci_cfgreg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bootsect.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bpb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/denode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/direntry.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/fat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/msdosfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpfdesc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bridge.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ethernet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/hostcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_dl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ieee80211.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_llc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_media.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_mib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_pppvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_slvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_sppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_stf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tapvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tun.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tunvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_vlan_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/intrq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/iso88025.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/net_osdep.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/netisr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/pfkeyv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_comp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_defs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/radix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/raw_cb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/route.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slcompress.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/zlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_faith.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/krpc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsdiskless.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsm_subs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsnode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsproto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrtt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrvcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nqnfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/rpcv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/xdr_subs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/aarp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_extern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/phase2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_cm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sigmgr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sys.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_vc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/kern_include.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/port.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/netgraph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_UI.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_async.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bpf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bridge.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_cisco.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_echo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ether.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_frame_relay.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_hole.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_iface.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ksocket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_lmi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_message.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_mppc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_one2many.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_parse.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pppoe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pptpgre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_rfc1490.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_sample.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socketvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tee.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_vjc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_eiface.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_etf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_device.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_l2tp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_fec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_ether.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_fddi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_hostcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_systm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_dummynet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_ecn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_encap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fil.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_flow.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_frag.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_icmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_mroute.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_nat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_proxy.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_state.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipprotosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_fsm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_seq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcpip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_gre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/icmp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_ifattach.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_prefix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_ecn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_fw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_mroute.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6protosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/mld6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/nd6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/scope6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/tcp6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/udp6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp_rijndael.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/raw_ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_ip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keydb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keysock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netnatm/natm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_cfg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_conn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_file.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_lib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_ncp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_nls.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rcfile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_sock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_user.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/nwerror.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_error.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/sp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spidp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_compr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_ihash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_inode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_vfsops.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_node.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/NXConstStr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Object.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Protocol.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/encoding.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/hash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-api.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/runtime.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/sarray.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/thr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/typedstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1_mac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/blowfish.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/buffer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cast.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/comp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf_api.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/crypto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dh.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dsa.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dso.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ebcdic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/evp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hmac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/lhash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/mdc2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/obj_mac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/objects.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs12.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs7.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rand.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ripemd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rsa.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/safestack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/sha.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl23.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl3.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/symhacks.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tls1.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tmdiff.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/txt_db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509_vfy.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509v3.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/idea.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1t.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cryptlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des_old.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/engine.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/krb5_asn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/kssl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ocsp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ossl_typ.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/eng_int.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_4758_cca_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_aep_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_atalla_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_cswift_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ncipher_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_nuron_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_sureware_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ubsec_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cardinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/driver.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/i82365.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pccard_nbk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/slot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/meciareg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcic_pci.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcicvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/mqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/posix4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/sched.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/semaphore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/dumprestore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/routed.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/rwhod.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/talkd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/timed.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/chardefs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/history.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/keymaps.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/readline.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlstdc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/tilde.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_unix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/clnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des_crypt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/key_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_clnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_rmt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_com.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_msg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc_auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/xdr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/bootparam_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/crypt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/key_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/klm_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nfs_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_cache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_callback.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_tags.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nislib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nlm_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rnusers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rquota.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rwall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/sm_inter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/spray.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypclnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yppasswd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypupdate_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypxfrd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_macros.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_appl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_mod_misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_modules.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/mit-sipb-copyright.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/_posix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ata.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/agpio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/assym.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/blist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/buf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus_private.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/callout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ccdvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdefs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdrio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/chio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/clist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/conf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cons.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/consio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/copyright.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dir.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dataacq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/device_port.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/devicestat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dirent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disklabel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/diskslice.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dkstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/domain.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dvdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf32.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf64.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_common.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_generic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/errno.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/event.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventhandler.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/exec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/extattr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fbio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fcntl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/file.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filedesc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/gmon.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inflate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/interrupt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inttypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioccom.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ipc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/jail.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/joystick.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kbio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kernel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kthread.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ktrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/libkern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lockf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/memrange.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mman.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/module.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msgbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mtio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/namei.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pciio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pipe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/poll.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/procfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/protosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/random.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/reboot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resource.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resourcevar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rman.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rtprio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/select.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sem.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/shm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signalvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/snoop.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socketvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sockio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/soundcard.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/stat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall-hide.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslimits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysproto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/systm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/taskqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/termios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/time.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timeb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timepps.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/times.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tprintf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttychars.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttycom.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydefaults.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucred.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/uio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/un.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unistd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unpcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/user.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/utsname.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vmmeter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wait.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wormio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/xrpuio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kobj.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/nlist_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mchain.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fnv_hash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/iconv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/pmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/swap_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_extern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_kern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_object.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_page.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pageout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_zone.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vnode_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/complex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdbool.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/langinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/netbios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_conn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_dev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_rq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_tran.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_trantcp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g2c.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/telnet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/elf-hints.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libusbhid.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib_vs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readpassphrase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/wchar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/wctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cast.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/castsb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptodev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptosoft.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/deflate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rijndael.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rmd160.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/skipjack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/xform.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipip_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keydb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keysock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/xform.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/bzlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/pod/perl.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perl5004delta.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlapio.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbook.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbot.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlcall.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldata.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldebug.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldelta.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldiag.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldsc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlembed.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq1.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq2.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq3.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq4.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq5.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq6.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq7.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlipc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq8.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq9.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlform.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfunc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlguts.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlhist.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perllocale.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perllol.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmod.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodinstall.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodlib.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlobj.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlop.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlopentut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlpod.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlport.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlre.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlref.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlreftut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlrun.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsec.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlstyle.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsub.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsyn.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlthrtut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltie.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoot.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltrap.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlvar.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxs.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxstut.pod
OLD_FILES+=usr/libdata/perl/5.00503/AnyDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/AutoLoader.pm
OLD_FILES+=usr/libdata/perl/5.00503/AutoSplit.pm
OLD_FILES+=usr/libdata/perl/5.00503/Benchmark.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN.pm
OLD_FILES+=usr/libdata/perl/5.00503/Carp.pm
OLD_FILES+=usr/libdata/perl/5.00503/Cwd.pm
OLD_FILES+=usr/libdata/perl/5.00503/DirHandle.pm
OLD_FILES+=usr/libdata/perl/5.00503/Dumpvalue.pm
OLD_FILES+=usr/libdata/perl/5.00503/English.pm
OLD_FILES+=usr/libdata/perl/5.00503/Env.pm
OLD_FILES+=usr/libdata/perl/5.00503/Exporter.pm
OLD_FILES+=usr/libdata/perl/5.00503/Fatal.pm
OLD_FILES+=usr/libdata/perl/5.00503/FileCache.pm
OLD_FILES+=usr/libdata/perl/5.00503/FileHandle.pm
OLD_FILES+=usr/libdata/perl/5.00503/FindBin.pm
OLD_FILES+=usr/libdata/perl/5.00503/SelectSaver.pm
OLD_FILES+=usr/libdata/perl/5.00503/SelfLoader.pm
OLD_FILES+=usr/libdata/perl/5.00503/Shell.pm
OLD_FILES+=usr/libdata/perl/5.00503/Symbol.pm
OLD_FILES+=usr/libdata/perl/5.00503/Test.pm
OLD_FILES+=usr/libdata/perl/5.00503/abbrev.pl
OLD_FILES+=usr/libdata/perl/5.00503/UNIVERSAL.pm
OLD_FILES+=usr/libdata/perl/5.00503/assert.pl
OLD_FILES+=usr/libdata/perl/5.00503/autouse.pm
OLD_FILES+=usr/libdata/perl/5.00503/base.pm
OLD_FILES+=usr/libdata/perl/5.00503/bigfloat.pl
OLD_FILES+=usr/libdata/perl/5.00503/bigint.pl
OLD_FILES+=usr/libdata/perl/5.00503/bigrat.pl
OLD_FILES+=usr/libdata/perl/5.00503/blib.pm
OLD_FILES+=usr/libdata/perl/5.00503/cacheout.pl
OLD_FILES+=usr/libdata/perl/5.00503/chat2.pl
OLD_FILES+=usr/libdata/perl/5.00503/complete.pl
OLD_FILES+=usr/libdata/perl/5.00503/constant.pm
OLD_FILES+=usr/libdata/perl/5.00503/ctime.pl
OLD_FILES+=usr/libdata/perl/5.00503/diagnostics.pm
OLD_FILES+=usr/libdata/perl/5.00503/dotsh.pl
OLD_FILES+=usr/libdata/perl/5.00503/dumpvar.pl
OLD_FILES+=usr/libdata/perl/5.00503/exceptions.pl
OLD_FILES+=usr/libdata/perl/5.00503/fastcwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/fields.pm
OLD_FILES+=usr/libdata/perl/5.00503/find.pl
OLD_FILES+=usr/libdata/perl/5.00503/finddepth.pl
OLD_FILES+=usr/libdata/perl/5.00503/flush.pl
OLD_FILES+=usr/libdata/perl/5.00503/ftp.pl
OLD_FILES+=usr/libdata/perl/5.00503/getcwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/getopt.pl
OLD_FILES+=usr/libdata/perl/5.00503/getopts.pl
OLD_FILES+=usr/libdata/perl/5.00503/hostname.pl
OLD_FILES+=usr/libdata/perl/5.00503/importenv.pl
OLD_FILES+=usr/libdata/perl/5.00503/integer.pm
OLD_FILES+=usr/libdata/perl/5.00503/less.pm
OLD_FILES+=usr/libdata/perl/5.00503/lib.pm
OLD_FILES+=usr/libdata/perl/5.00503/locale.pm
OLD_FILES+=usr/libdata/perl/5.00503/look.pl
OLD_FILES+=usr/libdata/perl/5.00503/newgetopt.pl
OLD_FILES+=usr/libdata/perl/5.00503/open2.pl
OLD_FILES+=usr/libdata/perl/5.00503/open3.pl
OLD_FILES+=usr/libdata/perl/5.00503/overload.pm
OLD_FILES+=usr/libdata/perl/5.00503/perl5db.pl
OLD_FILES+=usr/libdata/perl/5.00503/pwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/shellwords.pl
OLD_FILES+=usr/libdata/perl/5.00503/sigtrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/stat.pl
OLD_FILES+=usr/libdata/perl/5.00503/strict.pm
OLD_FILES+=usr/libdata/perl/5.00503/subs.pm
OLD_FILES+=usr/libdata/perl/5.00503/syslog.pl
OLD_FILES+=usr/libdata/perl/5.00503/tainted.pl
OLD_FILES+=usr/libdata/perl/5.00503/termcap.pl
OLD_FILES+=usr/libdata/perl/5.00503/timelocal.pl
OLD_FILES+=usr/libdata/perl/5.00503/validate.pl
OLD_FILES+=usr/libdata/perl/5.00503/vars.pm
OLD_FILES+=usr/libdata/perl/5.00503/re.pm
OLD_FILES+=usr/libdata/perl/5.00503/Config.pm
OLD_FILES+=usr/libdata/perl/5.00503/.exists
OLD_FILES+=usr/libdata/perl/5.00503/DynaLoader.pm
OLD_FILES+=usr/share/perl/man/man3/AnyDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/AutoLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/AutoSplit.3.gz
OLD_FILES+=usr/share/perl/man/man3/B.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Asmdata.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Assembler.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Bblock.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Bytecode.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::C.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::CC.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Debug.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Deparse.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Disassembler.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Lint.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Showlex.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Stackobj.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Terse.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Xref.3.gz
OLD_FILES+=usr/share/perl/man/man3/Benchmark.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Apache.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Carp.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Cookie.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Fast.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Push.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Switch.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN::FirstTime.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN::Nox.3.gz
OLD_FILES+=usr/share/perl/man/man3/Carp.3.gz
OLD_FILES+=usr/share/perl/man/man3/Class::Struct.3.gz
OLD_FILES+=usr/share/perl/man/man3/Config.3.gz
OLD_FILES+=usr/share/perl/man/man3/Cwd.3.gz
OLD_FILES+=usr/share/perl/man/man3/DB_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Data::Dumper.3.gz
OLD_FILES+=usr/share/perl/man/man3/Devel::SelfStubber.3.gz
OLD_FILES+=usr/share/perl/man/man3/DirHandle.3.gz
OLD_FILES+=usr/share/perl/man/man3/Dumpvalue.3.gz
OLD_FILES+=usr/share/perl/man/man3/DynaLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/English.3.gz
OLD_FILES+=usr/share/perl/man/man3/Env.3.gz
OLD_FILES+=usr/share/perl/man/man3/Exporter.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Command.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Embed.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Install.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Installed.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Liblist.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_OS2.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Unix.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_VMS.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Win32.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MakeMaker.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Manifest.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mkbootstrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mksymlists.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Packlist.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::testlib.3.gz
OLD_FILES+=usr/share/perl/man/man3/Fatal.3.gz
OLD_FILES+=usr/share/perl/man/man3/Fcntl.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Basename.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::CheckTree.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Compare.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Copy.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::DosGlob.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Find.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Path.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Mac.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::OS2.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Unix.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::VMS.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Win32.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::stat.3.gz
OLD_FILES+=usr/share/perl/man/man3/FileCache.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO.3.gz
OLD_FILES+=usr/share/perl/man/man3/FileHandle.3.gz
OLD_FILES+=usr/share/perl/man/man3/FindBin.3.gz
OLD_FILES+=usr/share/perl/man/man3/GDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Getopt::Long.3.gz
OLD_FILES+=usr/share/perl/man/man3/Getopt::Std.3.gz
OLD_FILES+=usr/share/perl/man/man3/I18N::Collate.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::File.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Handle.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Pipe.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Seekable.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Select.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Msg.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Open2.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Open3.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Semaphore.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::SysV.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::BigFloat.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::BigInt.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::Complex.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::Trig.3.gz
OLD_FILES+=usr/share/perl/man/man3/NDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::Ping.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::hostent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::netent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::protoent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::servent.3.gz
OLD_FILES+=usr/share/perl/man/man3/O.3.gz
OLD_FILES+=usr/share/perl/man/man3/ODBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Opcode.3.gz
OLD_FILES+=usr/share/perl/man/man3/POSIX.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Html.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text.3.gz
OLD_FILES+=usr/share/perl/man/man3/SDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Safe.3.gz
OLD_FILES+=usr/share/perl/man/man3/Search::Dict.3.gz
OLD_FILES+=usr/share/perl/man/man3/SelectSaver.3.gz
OLD_FILES+=usr/share/perl/man/man3/SelfLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/Shell.3.gz
OLD_FILES+=usr/share/perl/man/man3/Socket.3.gz
OLD_FILES+=usr/share/perl/man/man3/Symbol.3.gz
OLD_FILES+=usr/share/perl/man/man3/re.3.gz
OLD_FILES+=usr/share/perl/man/man3/Sys::Hostname.3.gz
OLD_FILES+=usr/share/perl/man/man3/Sys::Syslog.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::Cap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::Complete.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::ReadLine.3.gz
OLD_FILES+=usr/share/perl/man/man3/Test.3.gz
OLD_FILES+=usr/share/perl/man/man3/Test::Harness.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Abbrev.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::ParseWords.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Soundex.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Tabs.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Wrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Queue.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Semaphore.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Signal.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Specific.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Array.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Handle.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Hash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::RefHash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Scalar.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::SubstrHash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::Local.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::gmtime.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::localtime.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::tm.3.gz
OLD_FILES+=usr/share/perl/man/man3/UNIVERSAL.3.gz
OLD_FILES+=usr/share/perl/man/man3/User::grent.3.gz
OLD_FILES+=usr/share/perl/man/man3/User::pwent.3.gz
OLD_FILES+=usr/share/perl/man/man3/attrs.3.gz
OLD_FILES+=usr/share/perl/man/man3/autouse.3.gz
OLD_FILES+=usr/share/perl/man/man3/base.3.gz
OLD_FILES+=usr/share/perl/man/man3/blib.3.gz
OLD_FILES+=usr/share/perl/man/man3/constant.3.gz
OLD_FILES+=usr/share/perl/man/man3/diagnostics.3.gz
OLD_FILES+=usr/share/perl/man/man3/fields.3.gz
OLD_FILES+=usr/share/perl/man/man3/integer.3.gz
OLD_FILES+=usr/share/perl/man/man3/less.3.gz
OLD_FILES+=usr/share/perl/man/man3/lib.3.gz
OLD_FILES+=usr/share/perl/man/man3/locale.3.gz
OLD_FILES+=usr/share/perl/man/man3/ops.3.gz
OLD_FILES+=usr/share/perl/man/man3/overload.3.gz
OLD_FILES+=usr/share/perl/man/man3/sigtrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/strict.3.gz
OLD_FILES+=usr/share/perl/man/man3/subs.3.gz
OLD_FILES+=usr/share/perl/man/man3/vars.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Stash.3.gz
OLD_FILES+=usr/share/perl/man/man3/ByteLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Pretty.3.gz
OLD_FILES+=usr/share/perl/man/man3/Carp::Heavy.3.gz
OLD_FILES+=usr/share/perl/man/man3/DB.3.gz
OLD_FILES+=usr/share/perl/man/man3/DProf::DProf.3.gz
OLD_FILES+=usr/share/perl/man/man3/Exporter::Heavy.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Cygwin.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Glob.3.gz
OLD_FILES+=usr/share/perl/man/man3/Glob::Glob.3.gz
OLD_FILES+=usr/share/perl/man/man3/Hostname::Hostname.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Dir.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Poll.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket::INET.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket::UNIX.3.gz
OLD_FILES+=usr/share/perl/man/man3/Peek::Peek.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Checker.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Find.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::InputObjects.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Man.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::ParseUtils.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Parser.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Plainer.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Select.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Color.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Termcap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Usage.3.gz
OLD_FILES+=usr/share/perl/man/man3/Syslog::Syslog.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::ANSIColor.3.gz
OLD_FILES+=usr/share/perl/man/man3/XSLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/attributes.3.gz
OLD_FILES+=usr/share/perl/man/man3/bytes.3.gz
OLD_FILES+=usr/share/perl/man/man3/charnames.3.gz
OLD_FILES+=usr/share/perl/man/man3/filetest.3.gz
OLD_FILES+=usr/share/perl/man/man3/open.3.gz
OLD_FILES+=usr/share/perl/man/man3/utf8.3.gz
OLD_FILES+=usr/share/perl/man/man3/warnings.3.gz
OLD_FILES+=usr/share/perl/man/man3/warnings::register.3.gz
OLD_FILES+=usr/share/perl/man/whatis
OLD_FILES+=usr/share/man/man1/CA.pl.1.gz
OLD_FILES+=usr/share/man/man1/asn1parse.1.gz
OLD_FILES+=usr/share/man/man1/ca.1.gz
OLD_FILES+=usr/share/man/man1/ciphers.1.gz
OLD_FILES+=usr/share/man/man1/config.1.gz
OLD_FILES+=usr/share/man/man1/crl.1.gz
OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/dgst.1.gz
OLD_FILES+=usr/share/man/man1/dhparam.1.gz
OLD_FILES+=usr/share/man/man1/doscmd.1.gz
OLD_FILES+=usr/share/man/man1/dsa.1.gz
OLD_FILES+=usr/share/man/man1/dsaparam.1.gz
OLD_FILES+=usr/share/man/man1/enc.1.gz
OLD_FILES+=usr/share/man/man1/gendsa.1.gz
OLD_FILES+=usr/share/man/man1/genrsa.1.gz
OLD_FILES+=usr/share/man/man1/getNAME.1.gz
OLD_FILES+=usr/share/man/man1/nseq.1.gz
OLD_FILES+=usr/share/man/man1/ocsp.1.gz
OLD_FILES+=usr/share/man/man1/openssl.1.gz
OLD_FILES+=usr/share/man/man1/perl.1.gz
OLD_FILES+=usr/share/man/man1/perl5004delta.1.gz
OLD_FILES+=usr/share/man/man1/perlapio.1.gz
OLD_FILES+=usr/share/man/man1/perlbook.1.gz
OLD_FILES+=usr/share/man/man1/perlbot.1.gz
OLD_FILES+=usr/share/man/man1/perlcall.1.gz
OLD_FILES+=usr/share/man/man1/perldata.1.gz
OLD_FILES+=usr/share/man/man1/perldebug.1.gz
OLD_FILES+=usr/share/man/man1/perldelta.1.gz
OLD_FILES+=usr/share/man/man1/perldiag.1.gz
OLD_FILES+=usr/share/man/man1/perldsc.1.gz
OLD_FILES+=usr/share/man/man1/perlembed.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq1.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq2.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq3.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq4.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq5.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq6.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq7.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq8.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq9.1.gz
OLD_FILES+=usr/share/man/man1/perlform.1.gz
OLD_FILES+=usr/share/man/man1/perlfunc.1.gz
OLD_FILES+=usr/share/man/man1/perlguts.1.gz
OLD_FILES+=usr/share/man/man1/perlhist.1.gz
OLD_FILES+=usr/share/man/man1/perlipc.1.gz
OLD_FILES+=usr/share/man/man1/perllocale.1.gz
OLD_FILES+=usr/share/man/man1/perllol.1.gz
OLD_FILES+=usr/share/man/man1/perlmod.1.gz
OLD_FILES+=usr/share/man/man1/perlmodinstall.1.gz
OLD_FILES+=usr/share/man/man1/perlmodlib.1.gz
OLD_FILES+=usr/share/man/man1/perlobj.1.gz
OLD_FILES+=usr/share/man/man1/perlop.1.gz
OLD_FILES+=usr/share/man/man1/perlopentut.1.gz
OLD_FILES+=usr/share/man/man1/perlpod.1.gz
OLD_FILES+=usr/share/man/man1/perlport.1.gz
OLD_FILES+=usr/share/man/man1/perlre.1.gz
OLD_FILES+=usr/share/man/man1/perlref.1.gz
OLD_FILES+=usr/share/man/man1/perlreftut.1.gz
OLD_FILES+=usr/share/man/man1/perlrun.1.gz
OLD_FILES+=usr/share/man/man1/perlsec.1.gz
OLD_FILES+=usr/share/man/man1/perlstyle.1.gz
OLD_FILES+=usr/share/man/man1/perlsub.1.gz
OLD_FILES+=usr/share/man/man1/perlsyn.1.gz
OLD_FILES+=usr/share/man/man1/perlthrtut.1.gz
OLD_FILES+=usr/share/man/man1/perltie.1.gz
OLD_FILES+=usr/share/man/man1/perltoc.1.gz
OLD_FILES+=usr/share/man/man1/perltoot.1.gz
OLD_FILES+=usr/share/man/man1/perltrap.1.gz
OLD_FILES+=usr/share/man/man1/perlvar.1.gz
OLD_FILES+=usr/share/man/man1/perlxs.1.gz
OLD_FILES+=usr/share/man/man1/perlxstut.1.gz
OLD_FILES+=usr/share/man/man1/perlbug.1.gz
OLD_FILES+=usr/share/man/man1/perlcc.1.gz
OLD_FILES+=usr/share/man/man1/perldoc.1.gz
OLD_FILES+=usr/share/man/man1/perl5005delta.1.gz
OLD_FILES+=usr/share/man/man1/perlfork.1.gz
OLD_FILES+=usr/share/man/man1/perlboot.1.gz
OLD_FILES+=usr/share/man/man1/perltootc.1.gz
OLD_FILES+=usr/share/man/man1/perldbmfilter.1.gz
OLD_FILES+=usr/share/man/man1/perldebguts.1.gz
OLD_FILES+=usr/share/man/man1/perlnumber.1.gz
OLD_FILES+=usr/share/man/man1/perlcompile.1.gz
OLD_FILES+=usr/share/man/man1/perltodo.1.gz
OLD_FILES+=usr/share/man/man1/perlapi.1.gz
OLD_FILES+=usr/share/man/man1/perlintern.1.gz
OLD_FILES+=usr/share/man/man1/perlhack.1.gz
OLD_FILES+=usr/share/man/man1/perlbc.1.gz
OLD_FILES+=usr/share/man/man1/pkcs12.1.gz
OLD_FILES+=usr/share/man/man1/pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/pkcs8.1.gz
OLD_FILES+=usr/share/man/man1/rand.1.gz
OLD_FILES+=usr/share/man/man1/req.1.gz
OLD_FILES+=usr/share/man/man1/rsa.1.gz
OLD_FILES+=usr/share/man/man1/rsautl.1.gz
OLD_FILES+=usr/share/man/man1/s_client.1.gz
OLD_FILES+=usr/share/man/man1/s_server.1.gz
OLD_FILES+=usr/share/man/man1/sess_id.1.gz
OLD_FILES+=usr/share/man/man1/smime.1.gz
OLD_FILES+=usr/share/man/man1/speed.1.gz
OLD_FILES+=usr/share/man/man1/spkac.1.gz
OLD_FILES+=usr/share/man/man1/verify.1.gz
OLD_FILES+=usr/share/man/man1/version.1.gz
OLD_FILES+=usr/share/man/man1/x509.1.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz
OLD_FILES+=usr/share/man/man3/cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_setkey.3.gz
OLD_FILES+=usr/share/man/man3/encrypt.3.gz
OLD_FILES+=usr/share/man/man3/endvfsent.3.gz
OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz
OLD_FILES+=usr/share/man/man3/getvfsent.3.gz
OLD_FILES+=usr/share/man/man3/isnanf.3.gz
OLD_FILES+=usr/share/man/man3/libautofs.3.gz
OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz
OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz
OLD_FILES+=usr/share/man/man3/pthread_mutexattr_getpshared.3.gz
OLD_FILES+=usr/share/man/man3/pthread_mutexattr_setpshared.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz
OLD_FILES+=usr/share/man/man3/setkey.3.gz
OLD_FILES+=usr/share/man/man3/setvfsent.3.gz
OLD_FILES+=usr/share/man/man3/ssl.3.gz
OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz
OLD_FILES+=usr/share/man/man3/vfsload.3.gz
OLD_FILES+=usr/share/man/man4/als4000.4.gz
OLD_FILES+=usr/share/man/man4/csa.4.gz
OLD_FILES+=usr/share/man/man4/emu10k1.4.gz
OLD_FILES+=usr/share/man/man4/euc.4.gz
OLD_FILES+=usr/share/man/man4/gusc.4.gz
OLD_FILES+=usr/share/man/man4/if_fwp.4.gz
OLD_FILES+=usr/share/man/man4/lomac.4.gz
OLD_FILES+=usr/share/man/man4/maestro3.4.gz
OLD_FILES+=usr/share/man/man4/raid.4.gz
OLD_FILES+=usr/share/man/man4/sbc.4.gz
OLD_FILES+=usr/share/man/man4/sd.4.gz
OLD_FILES+=usr/share/man/man4/snc.4.gz
OLD_FILES+=usr/share/man/man4/st.4.gz
OLD_FILES+=usr/share/man/man4/uaudio.4.gz
OLD_FILES+=usr/share/man/man4/utf2.4.gz
OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz
OLD_FILES+=usr/share/man/man5/disklabel.5.gz
OLD_FILES+=usr/share/man/man5/dm.conf.5.gz
OLD_FILES+=usr/share/man/man5/ranlib.5.gz
OLD_FILES+=usr/share/man/man5/utf2.5.gz
OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz
OLD_FILES+=usr/share/man/man7/mmroff.7.gz
OLD_FILES+=usr/share/man/man7/mwww.7.gz
OLD_FILES+=usr/share/man/man7/style.perl.7.gz
OLD_FILES+=usr/share/man/man8/apm.8.gz
OLD_FILES+=usr/share/man/man8/apmconf.8.gz
OLD_FILES+=usr/share/man/man8/apmd.8.gz
OLD_FILES+=usr/share/man/man8/dm.8.gz
OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz
OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz
OLD_FILES+=usr/share/man/man8/sconfig.8.gz
OLD_FILES+=usr/share/man/man8/ssl.8.gz
OLD_FILES+=usr/share/man/man8/wlconfig.8.gz
OLD_FILES+=usr/share/man/man9/CURSIG.9.gz
OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz
OLD_FILES+=usr/share/man/man9/at_exit.9.gz
OLD_FILES+=usr/share/man/man9/at_fork.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz
OLD_FILES+=usr/share/man/man9/endtsleep.9.gz
OLD_FILES+=usr/share/man/man9/jumbo.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz
OLD_FILES+=usr/share/man/man9/mac_biba.9.gz
OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz
OLD_FILES+=usr/share/man/man9/mono_time.9.gz
OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz
OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz
OLD_FILES+=usr/share/man/man9/posix4.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz
OLD_FILES+=usr/share/man/man9/runtime.9.gz
OLD_FILES+=usr/share/man/man9/sleepinit.9.gz
OLD_FILES+=usr/share/man/man9/unsleep.9.gz
OLD_FILES+=usr/share/man/ja/man1/perl.1.gz
OLD_FILES+=usr/share/games/atc/Game_List
OLD_FILES+=usr/share/games/atc/Killer
OLD_FILES+=usr/share/games/atc/crossover
OLD_FILES+=usr/share/games/atc/default
OLD_FILES+=usr/share/games/atc/easy
OLD_FILES+=usr/share/games/atc/game_2
OLD_FILES+=usr/share/games/larn/larnmaze
OLD_FILES+=usr/share/games/larn/larnopts
OLD_FILES+=usr/share/games/larn/larn.help
OLD_FILES+=usr/share/games/quiz.db/africa
OLD_FILES+=usr/share/games/quiz.db/america
OLD_FILES+=usr/share/games/quiz.db/areas
OLD_FILES+=usr/share/games/quiz.db/arith
OLD_FILES+=usr/share/games/quiz.db/asia
OLD_FILES+=usr/share/games/quiz.db/babies
OLD_FILES+=usr/share/games/quiz.db/bard
OLD_FILES+=usr/share/games/quiz.db/chinese
OLD_FILES+=usr/share/games/quiz.db/collectives
OLD_FILES+=usr/share/games/quiz.db/ed
OLD_FILES+=usr/share/games/quiz.db/elements
OLD_FILES+=usr/share/games/quiz.db/europe
OLD_FILES+=usr/share/games/quiz.db/flowers
OLD_FILES+=usr/share/games/quiz.db/greek
OLD_FILES+=usr/share/games/quiz.db/inca
OLD_FILES+=usr/share/games/quiz.db/index
OLD_FILES+=usr/share/games/quiz.db/latin
OLD_FILES+=usr/share/games/quiz.db/locomotive
OLD_FILES+=usr/share/games/quiz.db/midearth
OLD_FILES+=usr/share/games/quiz.db/morse
OLD_FILES+=usr/share/games/quiz.db/murders
OLD_FILES+=usr/share/games/quiz.db/poetry
OLD_FILES+=usr/share/games/quiz.db/posneg
OLD_FILES+=usr/share/games/quiz.db/pres
OLD_FILES+=usr/share/games/quiz.db/province
OLD_FILES+=usr/share/games/quiz.db/seq-easy
OLD_FILES+=usr/share/games/quiz.db/seq-hard
OLD_FILES+=usr/share/games/quiz.db/sexes
OLD_FILES+=usr/share/games/quiz.db/sov
OLD_FILES+=usr/share/games/quiz.db/spell
OLD_FILES+=usr/share/games/quiz.db/state
OLD_FILES+=usr/share/games/quiz.db/trek
OLD_FILES+=usr/share/games/quiz.db/ucc
OLD_FILES+=usr/share/games/cribbage.instr
OLD_FILES+=usr/share/games/fish.instr
OLD_FILES+=usr/share/games/wump.info
OLD_FILES+=usr/games/hide/adventure
OLD_FILES+=usr/games/hide/arithmetic
OLD_FILES+=usr/games/hide/atc
OLD_FILES+=usr/games/hide/backgammon
OLD_FILES+=usr/games/hide/teachgammon
OLD_FILES+=usr/games/hide/battlestar
OLD_FILES+=usr/games/hide/bs
OLD_FILES+=usr/games/hide/canfield
OLD_FILES+=usr/games/hide/cribbage
OLD_FILES+=usr/games/hide/fish
OLD_FILES+=usr/games/hide/hack
OLD_FILES+=usr/games/hide/hangman
OLD_FILES+=usr/games/hide/larn
OLD_FILES+=usr/games/hide/mille
OLD_FILES+=usr/games/hide/phantasia
OLD_FILES+=usr/games/hide/quiz
OLD_FILES+=usr/games/hide/robots
OLD_FILES+=usr/games/hide/rogue
OLD_FILES+=usr/games/hide/sail
OLD_FILES+=usr/games/hide/snake
OLD_FILES+=usr/games/hide/trek
OLD_FILES+=usr/games/hide/worm
OLD_FILES+=usr/games/hide/wump
OLD_FILES+=usr/games/adventure
OLD_FILES+=usr/games/arithmetic
OLD_FILES+=usr/games/atc
OLD_FILES+=usr/games/backgammon
OLD_FILES+=usr/games/teachgammon
OLD_FILES+=usr/games/battlestar
OLD_FILES+=usr/games/bs
OLD_FILES+=usr/games/canfield
OLD_FILES+=usr/games/cfscores
OLD_FILES+=usr/games/cribbage
OLD_FILES+=usr/games/dm
OLD_FILES+=usr/games/fish
OLD_FILES+=usr/games/hack
OLD_FILES+=usr/games/hangman
OLD_FILES+=usr/games/larn
OLD_FILES+=usr/games/mille
OLD_FILES+=usr/games/phantasia
OLD_FILES+=usr/games/piano
OLD_FILES+=usr/games/pig
OLD_FILES+=usr/games/quiz
OLD_FILES+=usr/games/rain
OLD_FILES+=usr/games/robots
OLD_FILES+=usr/games/rogue
OLD_FILES+=usr/games/sail
OLD_FILES+=usr/games/snake
OLD_FILES+=usr/games/snscore
OLD_FILES+=usr/games/trek
OLD_FILES+=usr/games/wargames
OLD_FILES+=usr/games/worm
OLD_FILES+=usr/games/worms
OLD_FILES+=usr/games/wump
OLD_FILES+=sbin/mount_reiserfs
OLD_FILES+=usr/include/cam/cam_extend.h
OLD_FILES+=usr/include/dev/wi/wi_hostap.h
OLD_FILES+=usr/include/disktab.h
OLD_FILES+=usr/include/g++/FlexLexer.h
OLD_FILES+=usr/include/g++/PlotFile.h
OLD_FILES+=usr/include/g++/SFile.h
OLD_FILES+=usr/include/g++/_G_config.h
OLD_FILES+=usr/include/g++/algo.h
OLD_FILES+=usr/include/g++/algobase.h
OLD_FILES+=usr/include/g++/algorithm
OLD_FILES+=usr/include/g++/alloc.h
OLD_FILES+=usr/include/g++/bitset
OLD_FILES+=usr/include/g++/builtinbuf.h
OLD_FILES+=usr/include/g++/bvector.h
OLD_FILES+=usr/include/g++/cassert
OLD_FILES+=usr/include/g++/cctype
OLD_FILES+=usr/include/g++/cerrno
OLD_FILES+=usr/include/g++/cfloat
OLD_FILES+=usr/include/g++/ciso646
OLD_FILES+=usr/include/g++/climits
OLD_FILES+=usr/include/g++/clocale
OLD_FILES+=usr/include/g++/cmath
OLD_FILES+=usr/include/g++/complex
OLD_FILES+=usr/include/g++/complex.h
OLD_FILES+=usr/include/g++/csetjmp
OLD_FILES+=usr/include/g++/csignal
OLD_FILES+=usr/include/g++/cstdarg
OLD_FILES+=usr/include/g++/cstddef
OLD_FILES+=usr/include/g++/cstdio
OLD_FILES+=usr/include/g++/cstdlib
OLD_FILES+=usr/include/g++/cstring
OLD_FILES+=usr/include/g++/ctime
OLD_FILES+=usr/include/g++/cwchar
OLD_FILES+=usr/include/g++/cwctype
OLD_FILES+=usr/include/g++/defalloc.h
OLD_FILES+=usr/include/g++/deque
OLD_FILES+=usr/include/g++/deque.h
OLD_FILES+=usr/include/g++/editbuf.h
OLD_FILES+=usr/include/g++/exception
OLD_FILES+=usr/include/g++/floatio.h
OLD_FILES+=usr/include/g++/fstream
OLD_FILES+=usr/include/g++/fstream.h
OLD_FILES+=usr/include/g++/function.h
OLD_FILES+=usr/include/g++/functional
OLD_FILES+=usr/include/g++/hash_map
OLD_FILES+=usr/include/g++/hash_map.h
OLD_FILES+=usr/include/g++/hash_set
OLD_FILES+=usr/include/g++/hash_set.h
OLD_FILES+=usr/include/g++/hashtable.h
OLD_FILES+=usr/include/g++/heap.h
OLD_FILES+=usr/include/g++/indstream.h
OLD_FILES+=usr/include/g++/iolibio.h
OLD_FILES+=usr/include/g++/iomanip
OLD_FILES+=usr/include/g++/iomanip.h
OLD_FILES+=usr/include/g++/iosfwd
OLD_FILES+=usr/include/g++/iostdio.h
OLD_FILES+=usr/include/g++/iostream
OLD_FILES+=usr/include/g++/iostream.h
OLD_FILES+=usr/include/g++/iostreamP.h
OLD_FILES+=usr/include/g++/istream.h
OLD_FILES+=usr/include/g++/iterator
OLD_FILES+=usr/include/g++/iterator.h
OLD_FILES+=usr/include/g++/libio.h
OLD_FILES+=usr/include/g++/libioP.h
OLD_FILES+=usr/include/g++/list
OLD_FILES+=usr/include/g++/list.h
OLD_FILES+=usr/include/g++/map
OLD_FILES+=usr/include/g++/map.h
OLD_FILES+=usr/include/g++/memory
OLD_FILES+=usr/include/g++/multimap.h
OLD_FILES+=usr/include/g++/multiset.h
OLD_FILES+=usr/include/g++/new
OLD_FILES+=usr/include/g++/new.h
OLD_FILES+=usr/include/g++/numeric
OLD_FILES+=usr/include/g++/ostream.h
OLD_FILES+=usr/include/g++/pair.h
OLD_FILES+=usr/include/g++/parsestream.h
OLD_FILES+=usr/include/g++/pfstream.h
OLD_FILES+=usr/include/g++/procbuf.h
OLD_FILES+=usr/include/g++/pthread_alloc
OLD_FILES+=usr/include/g++/pthread_alloc.h
OLD_FILES+=usr/include/g++/queue
OLD_FILES+=usr/include/g++/rope
OLD_FILES+=usr/include/g++/rope.h
OLD_FILES+=usr/include/g++/ropeimpl.h
OLD_FILES+=usr/include/g++/set
OLD_FILES+=usr/include/g++/set.h
OLD_FILES+=usr/include/g++/slist
OLD_FILES+=usr/include/g++/slist.h
OLD_FILES+=usr/include/g++/sstream
OLD_FILES+=usr/include/g++/stack
OLD_FILES+=usr/include/g++/stack.h
OLD_FILES+=usr/include/g++/std/bastring.cc
OLD_FILES+=usr/include/g++/std/bastring.h
OLD_FILES+=usr/include/g++/std/complext.cc
OLD_FILES+=usr/include/g++/std/complext.h
OLD_FILES+=usr/include/g++/std/dcomplex.h
OLD_FILES+=usr/include/g++/std/fcomplex.h
OLD_FILES+=usr/include/g++/std/gslice.h
OLD_FILES+=usr/include/g++/std/gslice_array.h
OLD_FILES+=usr/include/g++/std/indirect_array.h
OLD_FILES+=usr/include/g++/std/ldcomplex.h
OLD_FILES+=usr/include/g++/std/mask_array.h
OLD_FILES+=usr/include/g++/std/slice.h
OLD_FILES+=usr/include/g++/std/slice_array.h
OLD_FILES+=usr/include/g++/std/std_valarray.h
OLD_FILES+=usr/include/g++/std/straits.h
OLD_FILES+=usr/include/g++/std/valarray_array.h
OLD_FILES+=usr/include/g++/std/valarray_array.tcc
OLD_FILES+=usr/include/g++/std/valarray_meta.h
OLD_FILES+=usr/include/g++/stdexcept
OLD_FILES+=usr/include/g++/stdiostream.h
OLD_FILES+=usr/include/g++/stl.h
OLD_FILES+=usr/include/g++/stl_algo.h
OLD_FILES+=usr/include/g++/stl_algobase.h
OLD_FILES+=usr/include/g++/stl_alloc.h
OLD_FILES+=usr/include/g++/stl_bvector.h
OLD_FILES+=usr/include/g++/stl_config.h
OLD_FILES+=usr/include/g++/stl_construct.h
OLD_FILES+=usr/include/g++/stl_deque.h
OLD_FILES+=usr/include/g++/stl_function.h
OLD_FILES+=usr/include/g++/stl_hash_fun.h
OLD_FILES+=usr/include/g++/stl_hash_map.h
OLD_FILES+=usr/include/g++/stl_hash_set.h
OLD_FILES+=usr/include/g++/stl_hashtable.h
OLD_FILES+=usr/include/g++/stl_heap.h
OLD_FILES+=usr/include/g++/stl_iterator.h
OLD_FILES+=usr/include/g++/stl_list.h
OLD_FILES+=usr/include/g++/stl_map.h
OLD_FILES+=usr/include/g++/stl_multimap.h
OLD_FILES+=usr/include/g++/stl_multiset.h
OLD_FILES+=usr/include/g++/stl_numeric.h
OLD_FILES+=usr/include/g++/stl_pair.h
OLD_FILES+=usr/include/g++/stl_queue.h
OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h
OLD_FILES+=usr/include/g++/stl_relops.h
OLD_FILES+=usr/include/g++/stl_rope.h
OLD_FILES+=usr/include/g++/stl_set.h
OLD_FILES+=usr/include/g++/stl_slist.h
OLD_FILES+=usr/include/g++/stl_stack.h
OLD_FILES+=usr/include/g++/stl_tempbuf.h
OLD_FILES+=usr/include/g++/stl_tree.h
OLD_FILES+=usr/include/g++/stl_uninitialized.h
OLD_FILES+=usr/include/g++/stl_vector.h
OLD_FILES+=usr/include/g++/stream.h
OLD_FILES+=usr/include/g++/streambuf.h
OLD_FILES+=usr/include/g++/strfile.h
OLD_FILES+=usr/include/g++/string
OLD_FILES+=usr/include/g++/strstream
OLD_FILES+=usr/include/g++/strstream.h
OLD_FILES+=usr/include/g++/tempbuf.h
OLD_FILES+=usr/include/g++/tree.h
OLD_FILES+=usr/include/g++/type_traits.h
OLD_FILES+=usr/include/g++/typeinfo
OLD_FILES+=usr/include/g++/utility
OLD_FILES+=usr/include/g++/valarray
OLD_FILES+=usr/include/g++/vector
OLD_FILES+=usr/include/g++/vector.h
OLD_FILES+=usr/include/gmp.h
OLD_FILES+=usr/include/isc/assertions.h
OLD_FILES+=usr/include/isc/ctl.h
OLD_FILES+=usr/include/isc/dst.h
OLD_FILES+=usr/include/isc/eventlib.h
OLD_FILES+=usr/include/isc/heap.h
OLD_FILES+=usr/include/isc/irpmarshall.h
OLD_FILES+=usr/include/isc/list.h
OLD_FILES+=usr/include/isc/logging.h
OLD_FILES+=usr/include/isc/memcluster.h
OLD_FILES+=usr/include/isc/misc.h
OLD_FILES+=usr/include/isc/tree.h
OLD_FILES+=usr/include/machine/ansi.h
OLD_FILES+=usr/include/machine/apic.h
OLD_FILES+=usr/include/machine/asc_ioctl.h
OLD_FILES+=usr/include/machine/asnames.h
OLD_FILES+=usr/include/machine/bus_at386.h
OLD_FILES+=usr/include/machine/bus_memio.h
OLD_FILES+=usr/include/machine/bus_pc98.h
OLD_FILES+=usr/include/machine/bus_pio.h
OLD_FILES+=usr/include/machine/cdk.h
OLD_FILES+=usr/include/machine/comstats.h
OLD_FILES+=usr/include/machine/console.h
OLD_FILES+=usr/include/machine/critical.h
OLD_FILES+=usr/include/machine/cronyx.h
OLD_FILES+=usr/include/machine/dvcfg.h
OLD_FILES+=usr/include/machine/globaldata.h
OLD_FILES+=usr/include/machine/globals.h
OLD_FILES+=usr/include/machine/gsc.h
OLD_FILES+=usr/include/machine/i4b_isppp.h
OLD_FILES+=usr/include/machine/if_wavelan_ieee.h
OLD_FILES+=usr/include/machine/iic.h
OLD_FILES+=usr/include/machine/ioctl_ctx.h
OLD_FILES+=usr/include/machine/ioctl_fd.h
OLD_FILES+=usr/include/machine/ipl.h
OLD_FILES+=usr/include/machine/lock.h
OLD_FILES+=usr/include/machine/mouse.h
OLD_FILES+=usr/include/machine/mpapic.h
OLD_FILES+=usr/include/machine/mtpr.h
OLD_FILES+=usr/include/machine/pc/msdos.h
OLD_FILES+=usr/include/machine/physio_proc.h
OLD_FILES+=usr/include/machine/smb.h
OLD_FILES+=usr/include/machine/spigot.h
OLD_FILES+=usr/include/machine/types.h
OLD_FILES+=usr/include/machine/uc_device.h
OLD_FILES+=usr/include/machine/ultrasound.h
OLD_FILES+=usr/include/machine/wtio.h
OLD_FILES+=usr/include/msdosfs/bootsect.h
OLD_FILES+=usr/include/msdosfs/bpb.h
OLD_FILES+=usr/include/msdosfs/denode.h
OLD_FILES+=usr/include/msdosfs/direntry.h
OLD_FILES+=usr/include/msdosfs/fat.h
OLD_FILES+=usr/include/msdosfs/msdosfsmount.h
OLD_FILES+=usr/include/net/hostcache.h
OLD_FILES+=usr/include/net/if_faith.h
OLD_FILES+=usr/include/net/if_ieee80211.h
OLD_FILES+=usr/include/net/if_tunvar.h
OLD_FILES+=usr/include/net/intrq.h
OLD_FILES+=usr/include/netatm/kern_include.h
OLD_FILES+=usr/include/netinet/if_fddi.h
OLD_FILES+=usr/include/netinet/in_hostcache.h
OLD_FILES+=usr/include/netinet/ip_flow.h
OLD_FILES+=usr/include/netinet/ip_fw2.h
OLD_FILES+=usr/include/netinet6/in6_prefix.h
OLD_FILES+=usr/include/netns/idp.h
OLD_FILES+=usr/include/netns/idp_var.h
OLD_FILES+=usr/include/netns/ns.h
OLD_FILES+=usr/include/netns/ns_error.h
OLD_FILES+=usr/include/netns/ns_if.h
OLD_FILES+=usr/include/netns/ns_pcb.h
OLD_FILES+=usr/include/netns/sp.h
OLD_FILES+=usr/include/netns/spidp.h
OLD_FILES+=usr/include/netns/spp_debug.h
OLD_FILES+=usr/include/netns/spp_timer.h
OLD_FILES+=usr/include/netns/spp_var.h
OLD_FILES+=usr/include/nfs/nfs.h
OLD_FILES+=usr/include/nfs/nfsm_subs.h
OLD_FILES+=usr/include/nfs/nfsmount.h
OLD_FILES+=usr/include/nfs/nfsnode.h
OLD_FILES+=usr/include/nfs/nfsrtt.h
OLD_FILES+=usr/include/nfs/nfsrvcache.h
OLD_FILES+=usr/include/nfs/nfsv2.h
OLD_FILES+=usr/include/nfs/nqnfs.h
OLD_FILES+=usr/include/ntfs/ntfs.h
OLD_FILES+=usr/include/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/ntfs/ntfsmount.h
OLD_FILES+=usr/include/nwfs/nwfs.h
OLD_FILES+=usr/include/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/nwfs/nwfs_node.h
OLD_FILES+=usr/include/nwfs/nwfs_subr.h
OLD_FILES+=usr/include/posix4/_semaphore.h
OLD_FILES+=usr/include/posix4/aio.h
OLD_FILES+=usr/include/posix4/ksem.h
OLD_FILES+=usr/include/posix4/mqueue.h
OLD_FILES+=usr/include/posix4/posix4.h
OLD_FILES+=usr/include/posix4/sched.h
OLD_FILES+=usr/include/posix4/semaphore.h
OLD_DIRS+=usr/include/posix4
OLD_FILES+=usr/include/security/_pam_compat.h
OLD_FILES+=usr/include/security/_pam_macros.h
OLD_FILES+=usr/include/security/_pam_types.h
OLD_FILES+=usr/include/security/pam_malloc.h
OLD_FILES+=usr/include/security/pam_misc.h
OLD_FILES+=usr/include/skey.h
OLD_FILES+=usr/include/strhash.h
OLD_FILES+=usr/include/struct.h
OLD_FILES+=usr/include/sys/_label.h
OLD_FILES+=usr/include/sys/_posix.h
OLD_FILES+=usr/include/sys/bus_private.h
OLD_FILES+=usr/include/sys/ccdvar.h
OLD_FILES+=usr/include/sys/diskslice.h
OLD_FILES+=usr/include/sys/dmap.h
OLD_FILES+=usr/include/sys/inttypes.h
OLD_FILES+=usr/include/sys/jumbo.h
OLD_FILES+=usr/include/sys/mac_policy.h
OLD_FILES+=usr/include/sys/pbioio.h
OLD_FILES+=usr/include/sys/syscall-hide.h
OLD_FILES+=usr/include/sys/tprintf.h
OLD_FILES+=usr/include/sys/vnioctl.h
OLD_FILES+=usr/include/sys/wormio.h
OLD_FILES+=usr/include/telnet.h
OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h
OLD_FILES+=usr/include/ufs/mfs/mfsnode.h
OLD_FILES+=usr/include/values.h
OLD_FILES+=usr/include/vm/vm_zone.h
OLD_FILES+=usr/share/examples/etc/usbd.conf
OLD_FILES+=usr/share/examples/meteor/README
OLD_FILES+=usr/share/examples/meteor/rgb16.c
OLD_FILES+=usr/share/examples/meteor/rgb24.c
OLD_FILES+=usr/share/examples/meteor/test-n.c
OLD_FILES+=usr/share/examples/meteor/yuvpk.c
OLD_FILES+=usr/share/examples/meteor/yuvpl.c
OLD_FILES+=usr/share/examples/worm/README
OLD_FILES+=usr/share/examples/worm/makecdfs.sh
OLD_FILES+=usr/share/groff_font/devlj4/Makefile
OLD_FILES+=usr/share/groff_font/devlj4/text.map
OLD_FILES+=usr/share/groff_font/devlj4/special.map
OLD_FILES+=usr/share/misc/nslookup.help
OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4
OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd
OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R
OLD_FILES+=usr/share/zoneinfo/Africa/Timbuktu
OLD_FILES+=usr/share/zoneinfo/Africa/Asmera
OLD_FILES+=usr/share/zoneinfo/America/Buenos_Aires
OLD_FILES+=usr/share/zoneinfo/America/Cordoba
OLD_FILES+=usr/share/zoneinfo/America/Jujuy
OLD_FILES+=usr/share/zoneinfo/America/Catamarca
OLD_FILES+=usr/share/zoneinfo/America/Mendoza
OLD_FILES+=usr/share/zoneinfo/America/Indianapolis
OLD_FILES+=usr/share/zoneinfo/America/Louisville
OLD_FILES+=usr/share/zoneinfo/America/Argentina/ComodRivadavia
OLD_FILES+=usr/share/zoneinfo/Atlantic/Faeroe
OLD_FILES+=usr/share/zoneinfo/Europe/Belfast
OLD_FILES+=usr/share/zoneinfo/Pacific/Yap
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT
OLD_FILES+=usr/share/zoneinfo/SystemV/HST10
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4
OLD_FILES+=usr/share/doc/ntp/accopt.htm
OLD_FILES+=usr/share/doc/ntp/assoc.htm
OLD_FILES+=usr/share/doc/ntp/audio.htm
OLD_FILES+=usr/share/doc/ntp/authopt.htm
OLD_FILES+=usr/share/doc/ntp/biblio.htm
OLD_FILES+=usr/share/doc/ntp/build.htm
OLD_FILES+=usr/share/doc/ntp/clockopt.htm
OLD_FILES+=usr/share/doc/ntp/config.htm
OLD_FILES+=usr/share/doc/ntp/confopt.htm
OLD_FILES+=usr/share/doc/ntp/copyright.htm
OLD_FILES+=usr/share/doc/ntp/debug.htm
OLD_FILES+=usr/share/doc/ntp/driver1.htm
OLD_FILES+=usr/share/doc/ntp/driver10.htm
OLD_FILES+=usr/share/doc/ntp/driver11.htm
OLD_FILES+=usr/share/doc/ntp/driver12.htm
OLD_FILES+=usr/share/doc/ntp/driver16.htm
OLD_FILES+=usr/share/doc/ntp/driver18.htm
OLD_FILES+=usr/share/doc/ntp/driver19.htm
OLD_FILES+=usr/share/doc/ntp/driver2.htm
OLD_FILES+=usr/share/doc/ntp/driver20.htm
OLD_FILES+=usr/share/doc/ntp/driver22.htm
OLD_FILES+=usr/share/doc/ntp/driver23.htm
OLD_FILES+=usr/share/doc/ntp/driver24.htm
OLD_FILES+=usr/share/doc/ntp/driver26.htm
OLD_FILES+=usr/share/doc/ntp/driver27.htm
OLD_FILES+=usr/share/doc/ntp/driver28.htm
OLD_FILES+=usr/share/doc/ntp/driver29.htm
OLD_FILES+=usr/share/doc/ntp/driver3.htm
OLD_FILES+=usr/share/doc/ntp/driver30.htm
OLD_FILES+=usr/share/doc/ntp/driver32.htm
OLD_FILES+=usr/share/doc/ntp/driver33.htm
OLD_FILES+=usr/share/doc/ntp/driver34.htm
OLD_FILES+=usr/share/doc/ntp/driver35.htm
OLD_FILES+=usr/share/doc/ntp/driver36.htm
OLD_FILES+=usr/share/doc/ntp/driver37.htm
OLD_FILES+=usr/share/doc/ntp/driver4.htm
OLD_FILES+=usr/share/doc/ntp/driver5.htm
OLD_FILES+=usr/share/doc/ntp/driver6.htm
OLD_FILES+=usr/share/doc/ntp/driver7.htm
OLD_FILES+=usr/share/doc/ntp/driver8.htm
OLD_FILES+=usr/share/doc/ntp/driver9.htm
OLD_FILES+=usr/share/doc/ntp/exec.htm
OLD_FILES+=usr/share/doc/ntp/extern.htm
OLD_FILES+=usr/share/doc/ntp/gadget.htm
OLD_FILES+=usr/share/doc/ntp/hints.htm
OLD_FILES+=usr/share/doc/ntp/howto.htm
OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm
OLD_FILES+=usr/share/doc/ntp/index.htm
OLD_FILES+=usr/share/doc/ntp/kern.htm
OLD_FILES+=usr/share/doc/ntp/kernpps.htm
OLD_FILES+=usr/share/doc/ntp/ldisc.htm
OLD_FILES+=usr/share/doc/ntp/measure.htm
OLD_FILES+=usr/share/doc/ntp/miscopt.htm
OLD_FILES+=usr/share/doc/ntp/monopt.htm
OLD_FILES+=usr/share/doc/ntp/mx4200data.htm
OLD_FILES+=usr/share/doc/ntp/notes.htm
OLD_FILES+=usr/share/doc/ntp/ntpd.htm
OLD_FILES+=usr/share/doc/ntp/ntpdate.htm
OLD_FILES+=usr/share/doc/ntp/ntpdc.htm
OLD_FILES+=usr/share/doc/ntp/ntpq.htm
OLD_FILES+=usr/share/doc/ntp/ntptime.htm
OLD_FILES+=usr/share/doc/ntp/ntptrace.htm
OLD_FILES+=usr/share/doc/ntp/parsedata.htm
OLD_FILES+=usr/share/doc/ntp/parsenew.htm
OLD_FILES+=usr/share/doc/ntp/patches.htm
OLD_FILES+=usr/share/doc/ntp/porting.htm
OLD_FILES+=usr/share/doc/ntp/pps.htm
OLD_FILES+=usr/share/doc/ntp/prefer.htm
OLD_FILES+=usr/share/doc/ntp/qth.htm
OLD_FILES+=usr/share/doc/ntp/quick.htm
OLD_FILES+=usr/share/doc/ntp/rdebug.htm
OLD_FILES+=usr/share/doc/ntp/refclock.htm
OLD_FILES+=usr/share/doc/ntp/release.htm
OLD_FILES+=usr/share/doc/ntp/tickadj.htm
OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/px.ascii.gz
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz
OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz
OLD_FILES+=usr/share/man/man3/mbmb.3.gz
OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz
OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz
.if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64"
OLD_FILES+=usr/share/man/man8/boot_i386.8.gz
.endif
.if ${TARGET_ARCH} != "aarch64" && ${TARGET} != "arm" && \
${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && \
${TARGET_ARCH} != "sparc64" && ${TARGET} != "mips"
OLD_FILES+=usr/share/man/man8/ofwdump.8.gz
.endif
OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz
OLD_FILES+=usr/share/man/man9/VFS_START.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz
OLD_FILES+=usr/share/info/annotate.info.gz
OLD_FILES+=usr/share/info/tar.info.gz
OLD_FILES+=usr/share/bsnmp/defs/tree.def
OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def
OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt
OLD_FILES+=usr/libdata/msdosfs/iso22dos
OLD_FILES+=usr/libdata/msdosfs/iso72dos
OLD_FILES+=usr/libdata/msdosfs/koi2dos
OLD_FILES+=usr/libdata/msdosfs/koi8u2dos
# The following files are *not* obsolete, they just don't get touched at
# install, so don't add them:
# - boot/loader.rc
# - usr/share/tmac/man.local
# - usr/share/tmac/mm/locale
# - usr/share/tmac/mm/se_locale
# - var/yp/Makefile
# 20071120: shared library version bump
OLD_LIBS+=usr/lib/libasn1.so.8
OLD_LIBS+=usr/lib/libgssapi.so.8
OLD_LIBS+=usr/lib/libgssapi_krb5.so.8
OLD_LIBS+=usr/lib/libhdb.so.8
OLD_LIBS+=usr/lib/libkadm5clnt.so.8
OLD_LIBS+=usr/lib/libkadm5srv.so.8
OLD_LIBS+=usr/lib/libkafs5.so.8
OLD_LIBS+=usr/lib/libkrb5.so.8
OLD_LIBS+=usr/lib/libobjc.so.2
OLD_LIBS+=usr/lib32/libgssapi.so.8
OLD_LIBS+=usr/lib32/libobjc.so.2
# 20070519: GCC 4.2
OLD_LIBS+=usr/lib/libg2c.a
OLD_LIBS+=usr/lib/libg2c.so
OLD_LIBS+=usr/lib/libg2c.so.2
OLD_LIBS+=usr/lib/libg2c_p.a
OLD_LIBS+=usr/lib/libgcc_pic.a
OLD_LIBS+=usr/lib32/libg2c.a
OLD_LIBS+=usr/lib32/libg2c.so
OLD_LIBS+=usr/lib32/libg2c.so.2
OLD_LIBS+=usr/lib32/libg2c_p.a
OLD_LIBS+=usr/lib32/libgcc_pic.a
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_LIBS+=lib/libcrypto.so.4
OLD_LIBS+=usr/lib/libssl.so.4
OLD_LIBS+=usr/lib32/libcrypto.so.4
OLD_LIBS+=usr/lib32/libssl.so.4
# 20060521: gethostbyaddr(3) ABI change
OLD_LIBS+=usr/lib/libroken.so.8
OLD_LIBS+=lib/libatm.so.3
OLD_LIBS+=lib/libc.so.6
OLD_LIBS+=lib/libutil.so.5
OLD_LIBS+=usr/lib32/libatm.so.3
OLD_LIBS+=usr/lib32/libc.so.6
OLD_LIBS+=usr/lib32/libutil.so.5
# 20060413: shared library moved to /usr/lib
OLD_LIBS+=lib/libgpib.so.1
# 20060413: libpcap.so.4 moved to /lib/
OLD_LIBS+=usr/lib/libpcap.so.4
# 20060412: libpthread.so.2 moved to /lib/
OLD_LIBS+=usr/lib/libpthread.so.2
# 20060127: revert libdisk to static-only
OLD_LIBS+=usr/lib/libdisk.so.3
# 20051027: libc_r discontinued (removed 20101113)
OLD_LIBS+=usr/lib/libc_r.a
OLD_LIBS+=usr/lib/libc_r.so
OLD_LIBS+=usr/lib/libc_r.so.7
OLD_LIBS+=usr/lib/libc_r_p.a
OLD_LIBS+=usr/lib32/libc_r.a
OLD_LIBS+=usr/lib32/libc_r.so
OLD_LIBS+=usr/lib32/libc_r.so.7
OLD_LIBS+=usr/lib32/libc_r_p.a
# 20050722: bump for 6.0-RELEASE
OLD_LIBS+=lib/libalias.so.4
OLD_LIBS+=lib/libatm.so.2
OLD_LIBS+=lib/libbegemot.so.1
OLD_LIBS+=lib/libbsdxml.so.1
OLD_LIBS+=lib/libbsnmp.so.2
OLD_LIBS+=lib/libc.so.5
OLD_LIBS+=lib/libcam.so.2
OLD_LIBS+=lib/libcrypt.so.2
OLD_LIBS+=lib/libcrypto.so.3
OLD_LIBS+=lib/libdevstat.so.4
OLD_LIBS+=lib/libedit.so.4
OLD_LIBS+=lib/libgeom.so.2
OLD_LIBS+=lib/libgpib.so.0
OLD_LIBS+=lib/libipsec.so.1
OLD_LIBS+=lib/libipx.so.2
OLD_LIBS+=lib/libkiconv.so.1
OLD_LIBS+=lib/libkvm.so.2
OLD_LIBS+=lib/libm.so.3
OLD_LIBS+=lib/libmd.so.2
OLD_LIBS+=lib/libncurses.so.5
OLD_LIBS+=lib/libreadline.so.5
OLD_LIBS+=lib/libsbuf.so.2
OLD_LIBS+=lib/libufs.so.2
OLD_LIBS+=lib/libutil.so.4
OLD_LIBS+=lib/libz.so.2
OLD_LIBS+=usr/lib/libarchive.so.1
OLD_LIBS+=usr/lib/libasn1.so.7
OLD_LIBS+=usr/lib/libbluetooth.so.1
OLD_LIBS+=usr/lib/libbz2.so.1
OLD_LIBS+=usr/lib/libc_r.so.5
OLD_LIBS+=usr/lib/libcalendar.so.2
OLD_LIBS+=usr/lib/libcom_err.so.2
OLD_LIBS+=usr/lib/libdevinfo.so.2
OLD_LIBS+=usr/lib/libdialog.so.4
OLD_LIBS+=usr/lib/libfetch.so.3
OLD_LIBS+=usr/lib/libform.so.2
OLD_LIBS+=usr/lib/libftpio.so.5
OLD_LIBS+=usr/lib/libg2c.so.1
OLD_LIBS+=usr/lib/libgnuregex.so.2
OLD_LIBS+=usr/lib/libgssapi.so.7
OLD_LIBS+=usr/lib/libhdb.so.7
OLD_LIBS+=usr/lib/libhistory.so.5
OLD_LIBS+=usr/lib/libkadm5clnt.so.7
OLD_LIBS+=usr/lib/libkadm5srv.so.7
OLD_LIBS+=usr/lib/libkafs5.so.7
OLD_LIBS+=usr/lib/libkrb5.so.7
OLD_LIBS+=usr/lib/libmagic.so.1
OLD_LIBS+=usr/lib/libmenu.so.2
OLD_LIBS+=usr/lib/libmilter.so.2
OLD_LIBS+=usr/lib/libmp.so.4
OLD_LIBS+=usr/lib/libncp.so.1
OLD_LIBS+=usr/lib/libnetgraph.so.1
OLD_LIBS+=usr/lib/libngatm.so.1
OLD_LIBS+=usr/lib/libobjc.so.1
OLD_LIBS+=usr/lib/libopie.so.3
OLD_LIBS+=usr/lib/libpam.so.2
OLD_LIBS+=usr/lib/libpanel.so.2
OLD_LIBS+=usr/lib/libpcap.so.3
OLD_LIBS+=usr/lib/libpmc.so.2
OLD_LIBS+=usr/lib/libpthread.so.1
OLD_LIBS+=usr/lib/libradius.so.1
OLD_LIBS+=usr/lib/libroken.so.7
OLD_LIBS+=usr/lib/librpcsvc.so.2
OLD_LIBS+=usr/lib/libsdp.so.1
OLD_LIBS+=usr/lib/libsmb.so.1
OLD_LIBS+=usr/lib/libssh.so.2
OLD_LIBS+=usr/lib/libssl.so.3
OLD_LIBS+=usr/lib/libstdc++.so.4
OLD_LIBS+=usr/lib/libtacplus.so.1
OLD_LIBS+=usr/lib/libthr.so.1
OLD_LIBS+=usr/lib/libthread_db.so.1
OLD_LIBS+=usr/lib/libugidfw.so.1
OLD_LIBS+=usr/lib/libusbhid.so.1
OLD_LIBS+=usr/lib/libvgl.so.3
OLD_LIBS+=usr/lib/libwrap.so.3
OLD_LIBS+=usr/lib/libypclnt.so.1
OLD_LIBS+=usr/lib/pam_chroot.so.2
OLD_LIBS+=usr/lib/pam_deny.so.2
OLD_LIBS+=usr/lib/pam_echo.so.2
OLD_LIBS+=usr/lib/pam_exec.so.2
OLD_LIBS+=usr/lib/pam_ftpusers.so.2
OLD_LIBS+=usr/lib/pam_group.so.2
OLD_LIBS+=usr/lib/pam_guest.so.2
OLD_LIBS+=usr/lib/pam_krb5.so.2
OLD_LIBS+=usr/lib/pam_ksu.so.2
OLD_LIBS+=usr/lib/pam_lastlog.so.2
OLD_LIBS+=usr/lib/pam_login_access.so.2
OLD_LIBS+=usr/lib/pam_nologin.so.2
OLD_LIBS+=usr/lib/pam_opie.so.2
OLD_LIBS+=usr/lib/pam_opieaccess.so.2
OLD_LIBS+=usr/lib/pam_passwdqc.so.2
OLD_LIBS+=usr/lib/pam_permit.so.2
OLD_LIBS+=usr/lib/pam_radius.so.2
OLD_LIBS+=usr/lib/pam_rhosts.so.2
OLD_LIBS+=usr/lib/pam_rootok.so.2
OLD_LIBS+=usr/lib/pam_securetty.so.2
OLD_LIBS+=usr/lib/pam_self.so.2
OLD_LIBS+=usr/lib/pam_ssh.so.2
OLD_LIBS+=usr/lib/pam_tacplus.so.2
OLD_LIBS+=usr/lib/pam_unix.so.2
OLD_LIBS+=usr/lib/snmp_atm.so.3
OLD_LIBS+=usr/lib/snmp_mibII.so.3
OLD_LIBS+=usr/lib/snmp_netgraph.so.3
OLD_LIBS+=usr/lib/snmp_pf.so.3
# 200505XX: ?
OLD_LIBS+=usr/lib/snmp_atm.so.2
OLD_LIBS+=usr/lib/snmp_mibII.so.2
OLD_LIBS+=usr/lib/snmp_netgraph.so.2
OLD_LIBS+=usr/lib/snmp_pf.so.2
# 2005XXXX: not ready for primetime yet
OLD_LIBS+=usr/lib/libautofs.so.1
# 200411XX: libxpg4 removal
OLD_LIBS+=usr/lib/libxpg4.so.3
# 200410XX: libm compatibility fix
OLD_LIBS+=lib/libm.so.2
# 20041001: version bump
OLD_LIBS+=lib/libreadline.so.4
OLD_LIBS+=usr/lib/libhistory.so.4
OLD_LIBS+=usr/lib/libopie.so.2
OLD_LIBS+=usr/lib/libpcap.so.2
# 20040925: bind9 import
OLD_LIBS+=usr/lib/libisc.so.1
# 200408XX
OLD_LIBS+=usr/lib/snmp_netgraph.so.1
# 200404XX
OLD_LIBS+=usr/lib/libsnmp.so.1
OLD_LIBS+=usr/lib/snmp_mibII.so.1
# 200309XX
OLD_LIBS+=usr/lib/libasn1.so.6
OLD_LIBS+=usr/lib/libhdb.so.6
OLD_LIBS+=usr/lib/libkadm5clnt.so.6
OLD_LIBS+=usr/lib/libkadm5srv.so.6
OLD_LIBS+=usr/lib/libkrb5.so.6
OLD_LIBS+=usr/lib/libroken.so.6
# 200304XX
OLD_LIBS+=usr/lib/libc.so.4
OLD_LIBS+=usr/lib/libc_r.so.4
OLD_LIBS+=usr/lib/libdevstat.so.2
OLD_LIBS+=usr/lib/libedit.so.3
OLD_LIBS+=usr/lib/libgmp.so.3
OLD_LIBS+=usr/lib/libmp.so.3
OLD_LIBS+=usr/lib/libpam.so.1
OLD_LIBS+=usr/lib/libposix1e.so.2
OLD_LIBS+=usr/lib/libskey.so.2
OLD_LIBS+=usr/lib/libusbhid.so.0
OLD_LIBS+=usr/lib/libvgl.so.2
# 20030218: OpenSSL 0.9.7 import
OLD_FILES+=usr/include/des.h
OLD_FILES+=usr/lib/libdes.a
OLD_FILES+=usr/lib/libdes.so
OLD_LIBS+=usr/lib/libdes.so.3
OLD_FILES+=usr/lib/libdes_p.a
# 200302XX
OLD_LIBS+=usr/lib/libacl.so.3
OLD_LIBS+=usr/lib/libasn1.so.5
OLD_LIBS+=usr/lib/libcrypto.so.2
OLD_LIBS+=usr/lib/libgssapi.so.5
OLD_LIBS+=usr/lib/libhdb.so.5
OLD_LIBS+=usr/lib/libkadm.so.3
OLD_LIBS+=usr/lib/libkadm5clnt.so.5
OLD_LIBS+=usr/lib/libkadm5srv.so.5
OLD_LIBS+=usr/lib/libkafs.so.3
OLD_LIBS+=usr/lib/libkafs5.so.5
OLD_LIBS+=usr/lib/libkdb.so.3
OLD_LIBS+=usr/lib/libkrb.so.3
OLD_LIBS+=usr/lib/libroken.so.
OLD_LIBS+=usr/lib/libssl.so.2
OLD_LIBS+=usr/lib/pam_kerberosIV.so
# 200208XX
OLD_LIBS+=usr/lib/libgssapi.so.4
# 200203XX
OLD_LIBS+=usr/lib/libss.so.3
OLD_LIBS+=usr/lib/libusb.so.0
# 200112XX
OLD_LIBS+=usr/lib/libfetch.so.2
# 200110XX
OLD_LIBS+=usr/lib/libgssapi.so.3
# 200104XX
OLD_LIBS+=usr/lib/libdescrypt.so.2
OLD_LIBS+=usr/lib/libscrypt.so.2
# 200102XX
OLD_LIBS+=usr/lib/libcrypto.so.1
OLD_LIBS+=usr/lib/libssl.so.1
# 200009XX
OLD_LIBS+=usr/lib/libRSAglue.so.1
OLD_LIBS+=usr/lib/librsaINTL.so.1
OLD_LIBS+=usr/lib/librsaUSA.so.1
# 200006XX
OLD_LIBS+=usr/lib/libalias.so.3
OLD_LIBS+=usr/lib/libfetch.so.1
OLD_LIBS+=usr/lib/libipsec.so.0
# 200005XX
OLD_LIBS+=usr/lib/libxpg4.so.2
# 200002XX
OLD_LIBS+=usr/lib/libc.so.3
OLD_LIBS+=usr/lib/libcurses.so.2
OLD_LIBS+=usr/lib/libdialog.so.3
OLD_LIBS+=usr/lib/libedit.so.2
OLD_LIBS+=usr/lib/libf2c.so.2
OLD_LIBS+=usr/lib/libftpio.so.4
OLD_LIBS+=usr/lib/libg++.so.4
OLD_LIBS+=usr/lib/libhistory.so.3
OLD_LIBS+=usr/lib/libmytinfo.so.2
OLD_LIBS+=usr/lib/libncurses.so.3
OLD_LIBS+=usr/lib/libreadline.so.3
OLD_LIBS+=usr/lib/libss.so.2
OLD_LIBS+=usr/lib/libtermcap.so.2
OLD_LIBS+=usr/lib/libutil.so.2
OLD_LIBS+=usr/lib/libvgl.so.1
OLD_LIBS+=usr/lib/libwrap.so.2
# 19991216
OLD_FILES+=usr/sbin/xntpdc
# 199909XX
OLD_LIBS+=usr/lib/libc_r.so.3
# ???
OLD_LIBS+=usr/lib/libarchive.so.2
OLD_LIBS+=usr/lib/libbsnmp.so.1
OLD_LIBS+=usr/lib/libc_r.so.6
OLD_LIBS+=usr/lib32/libarchive.so.2
OLD_LIBS+=usr/lib32/libc_r.so.6
OLD_LIBS+=usr/lib/libcipher.so.2
OLD_LIBS+=usr/lib/libgssapi.so.6
OLD_LIBS+=usr/lib/libkse.so.1
OLD_LIBS+=usr/lib/liblwres.so.3
OLD_LIBS+=usr/lib/pam_ftp.so.2
# 20131013: Removal of the ATF tools
OLD_DIRS+=etc/atf
OLD_DIRS+=usr/share/examples/atf
OLD_DIRS+=usr/share/xml/atf
OLD_DIRS+=usr/share/xml
OLD_DIRS+=usr/share/xsl/atf
OLD_DIRS+=usr/share/xsl
# 20040925: bind9 import
OLD_DIRS+=usr/share/doc/bind/html
OLD_DIRS+=usr/share/doc/bind/misc
OLD_DIRS+=usr/share/doc/bind/
# ???
OLD_DIRS+=usr/include/g++/std
OLD_DIRS+=usr/include/msdosfs
OLD_DIRS+=usr/include/ntfs
OLD_DIRS+=usr/include/nwfs
OLD_DIRS+=usr/include/ufs/mfs
# 20011001: UUCP migration to ports
OLD_DIRS+=usr/libexec/uucp
.include "tools/build/mk/OptionalObsoleteFiles.inc"
Index: stable/12/UPDATING
===================================================================
--- stable/12/UPDATING (revision 356464)
+++ stable/12/UPDATING (revision 356465)
@@ -1,1995 +1,1995 @@
Updating Information for FreeBSD stable/12 users.
This file is maintained and copyrighted by M. Warner Losh <imp@freebsd.org>.
See end of file for further details. For commonly done items, please see the
COMMON ITEMS: section later in the file. These instructions assume that you
basically know what you are doing. If not, then please consult the FreeBSD
handbook:
https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/makeworld.html
Items affecting the ports and packages system can be found in
/usr/ports/UPDATING. Please read that file before running portupgrade.
NOTE: FreeBSD has switched from gcc to clang. If you have trouble bootstrapping
from older versions of FreeBSD, try WITHOUT_CLANG and WITH_GCC to bootstrap to
the tip of head, and then rebuild without this option. The bootstrap process
from older version of current across the gcc/clang cutover is a bit fragile.
20200107:
Clang, llvm, lld, lldb, compiler-rt, libc++, libunwind and openmp have
- been upgraded to 9.0.0. Please see the 20141231 entry below for
+ been upgraded to 9.0.1. Please see the 20141231 entry below for
information about prerequisites and upgrading, if you are not already
using clang 3.5.0 or higher.
20191107:
The nctgpio and wbwd drivers have been moved to the superio bus.
If you have one of these drivers in a kernel configuration, then
you should add device superio to it. If you use one of these drivers
as a module and you compile a custom set of modules, then you should
add superio to the set.
20191024:
The tap(4) driver has been folded into tun(4), and the module has been
renamed to tuntap. You should update any kld_list="if_tap" or
kld_list="if_tun" entries in /etc/rc.conf, if_tap_load="YES" or
if_tun_load="YES" entries in /boot/loader.conf to load the if_tuntap
module instead, and "device tap" or "device tun" entries in kernel
config files to select the tuntap device instead.
20190913:
ntpd no longer by default locks its pages in memory, allowing them
to be paged out by the kernel. Use rlimit memlock to restore
historic BSD behaviour. For example, add "rlimit memlock 32"
to ntp.conf to lock up to 32 MB of ntpd address space in memory.
20190914:
The vfs.fusefs.sync_unmount and vfs.fusefs.init_backgrounded sysctls
and the "-o sync_unmount" and "-o init_backgrounded" mount options have
been removed from mount_fusefs(8). You can safely remove them from
your scripts, because they had no effect.
The vfs.fusefs.fix_broken_io, vfs.fusefs.sync_resize,
vfs.fusefs.refresh_size, vfs.fusefs.mmap_enable,
vfs.fusefs.reclaim_revoked, and vfs.fusefs.data_cache_invalidate
sysctls have been removed. If you felt the need to set any of them to
a non-default value, please tell asomers@FreeBSD.org why.
20190906:
The fuse(4) module has been renamed to fusefs(4) for consistency with
other filesystems. You should update any kld_load="fuse" entries in
/etc/rc.conf, fuse_load="YES" entries in /boot/loader.conf, and
"options FUSE" enties in kernel config files.
20190811:
Default permissions on the /var/account/acct file (and copies of it
rotated by periodic daily scripts) are changed from 0644 to 0640
because the file contains sensitive information that should not be
world-readable. If the /var/account directory must be created by
rc.d/accounting, the mode used is now 0750. Admins who use the
accounting feature are encouraged to change the mode of an existing
/var/account directory to 0750 or 0700.
20190723:
Clang, llvm, lld, lldb, compiler-rt, libc++, libunwind and openmp have
been upgraded to 8.0.1. Please see the 20141231 entry below for
information about prerequisites and upgrading, if you are not already
using clang 3.5.0 or higher.
20190413:
Now Clang 8 has been merged (see the 20190412 entry below), the ifunc
functionality needed for the RETPOLINE option should work properly
again. The RETPOLINE option has been removed from BROKEN_OPTIONS.
20190412:
Clang, llvm, lld, lldb, compiler-rt and libc++ have been upgraded to
8.0.0. Please see the 20141231 entry below for information about
prerequisites and upgrading, if you are not already using clang 3.5.0
or higher.
20190307:
The RETPOLINE option produces non-functional ifunc-using binaries
with Clang 7.0.1, and has been forced off (via BROKEN_OPTIONS).
Once Clang 8 is merged it may be enabled again.
20190216:
Clang, llvm, lld, lldb, compiler-rt and libc++ have been upgraded to
7.0.1. Please see the 20141231 entry below for information about
prerequisites and upgrading, if you are not already using clang 3.5.0
or higher.
20190226:
geom_uzip(4) depends on the new module xz. If geom_uzip is statically
compiled into your custom kernel, add 'device xz' statement to the
kernel config.
20190214:
Iflib is no longer unconditionally compiled into the kernel. Drivers
using iflib and statically compiled into the kernel, now require
the 'device iflib' config option. For the same drivers loaded as
modules on kernels not having 'device iflib', the iflib.ko module
is loaded automatically.
20181228:
r342561 modifies the NFSv4 server so that it obeys vfs.nfsd.nfs_privport
in the same as it is applied to NFSv2 and 3. This implies that NFSv4
servers that have vfs.nfsd.nfs_privport set will only allow mounts
from clients using a reserved port#. Since both the FreeBSD and Linux
NFSv4 clients use reserved port#s by default, this should not affect
most NFSv4 mounts.
20181129:
On amd64, arm64 and armv7 (architectures that install LLVM's ld.lld
linker as /usr/bin/ld) GNU ld is no longer installed as ld.bfd, as
it produces broken binaries when ifuncs are in use. Users needing
GNU ld should install the binutils port or package.
20181115:
The set of CTM commands (ctm, ctm_smail, ctm_rmail, ctm_dequeue)
has been converted to a port (misc/ctm) and will be removed from
FreeBSD-13. A depreciation warning will be printed to stderr by
the ctm command, but the functionality will remain in base for all
FreeBSD-12 releases.
20181019:
The stable/12 branch has been created in subversion from head
revision r339432.
20181015:
Ports for the DRM modules have been simplified. Now, amd64 users should
just install the drm-kmod port. All others should install
drm-legacy-kmod.
Graphics hardware that's newer than about 2010 usually works with
drm-kmod. For hardware older than 2013, however, some users will need
to use drm-legacy-kmod if drm-kmod doesn't work for them. Hardware older
than 2008 usually only works in drm-legacy-kmod. The graphics team can
only commit to hardware made since 2013 due to the complexity of the
market and difficulty to test all the older cards effectively. If you
have hardware supported by drm-kmod, you are strongly encouraged to use
that as you will get better support.
Other than KPI chasing, drm-legacy-kmod will not be updated. As outlined
elsewhere, the drm and drm2 modules will be eliminated from the src base
soon (with a limited exception for arm). Please update to the package asap
and report any issues to x11@freebsd.org.
Generally, anybody using the drm*-kmod packages should add
WITHOUT_DRM_MODULE=t and WITHOUT_DRM2_MODULE=t to avoid nasty
cross-threading surprises, especially with automatic driver
loading from X11 startup. These will become the defaults in 13-current
shortly.
20181012:
The ixlv(4) driver has been renamed to iavf(4). As a consequence,
custom kernel and module loading configuration files must be updated
accordingly. Moreover, interfaces previous presented as ixlvN to the
system are now exposed as iavfN and network configuration files must
be adjusted as necessary.
20181009:
OpenSSL has been updated to version 1.1.1. This update included
additional various API changes througout the base system. It is
important to rebuild third-party software after upgrading. The value
of __FreeBSD_version has been bumped accordingly.
20181006:
The legacy DRM modules and drivers have now been added to the loader's
module blacklist, in favor of loading them with kld_list in rc.conf(5).
The module blacklist may be overridden with the loader.conf(5)
'module_blacklist' variable, but loading them via rc.conf(5) is strongly
encouraged.
20181002:
The cam(4) based nda(4) driver will be used over nvd(4) by default on
powerpc64. You may set 'options NVME_USE_NVD=1' in your kernel conf or
loader tunable 'hw.nvme.use_nvd=1' if you wish to use the existing
driver. Make sure to edit /boot/etc/kboot.conf and fstab to use the
nda device name.
20180913:
Reproducible build mode is now on by default, in preparation for
FreeBSD 12.0. This eliminates build metadata such as the user,
host, and time from the kernel (and uname), unless the working tree
corresponds to a modified checkout from a version control system.
The previous behavior can be obtained by setting the /etc/src.conf
knob WITHOUT_REPRODUCIBLE_BUILD.
20180826:
The Yarrow CSPRNG has been removed from the kernel as it has not been
supported by its designers since at least 2003. Fortuna has been the
default since FreeBSD-11.
20180822:
devctl freeze/thaw have gone into the tree, the rc scripts have been
updated to use them and devmatch has been changed. You should update
kernel, userland and rc scripts all at the same time.
20180818:
The default interpreter has been switched from 4th to Lua.
LOADER_DEFAULT_INTERP, documented in build(7), will override the default
interpreter. If you have custom FORTH code you will need to set
LOADER_DEFAULT_INTERP=4th (valid values are 4th, lua or simp) in
src.conf for the build. This will create default hard links between
loader and loader_4th instead of loader and loader_lua, the new default.
If you are using UEFI it will create the proper hard link to loader.efi.
bhyve uses userboot.so. It remains 4th-only until some issues are solved
regarding coexisting with multiple versions of FreeBSD are resolved.
20180815:
ls(1) now respects the COLORTERM environment variable used in other
systems and software to indicate that a colored terminal is both
supported and desired. If ls(1) is suddenly emitting colors, they may
be disabled again by either removing the unwanted COLORTERM from your
environment, or using `ls --color=never`. The ls(1) specific CLICOLOR
may not be observed in a future release.
20180808:
The default pager for most commands has been changed to "less". To
restore the old behavior, set PAGER="more" and MANPAGER="more -s" in
your environment.
20180731:
The jedec_ts(4) driver has been removed. A superset of its functionality
is available in the jedec_dimm(4) driver, and the manpage for that
driver includes migration instructions. If you have "device jedec_ts"
in your kernel configuration file, it must be removed.
20180730:
amd64/GENERIC now has EFI runtime services, EFIRT, enabled by default.
This should have no effect if the kernel is booted via BIOS/legacy boot.
EFIRT may be disabled via a loader tunable, efi.rt.disabled, if a system
has a buggy firmware that prevents a successful boot due to use of
runtime services.
20180727:
Atmel AT91RM9200 and AT91SAM9, Cavium CNS 11xx and XScale
support has been removed from the tree. These ports were
obsolete and/or known to be broken for many years.
20180723:
loader.efi has been augmented to participate more fully in the
UEFI boot manager protocol. loader.efi will now look at the
BootXXXX environment variable to determine if a specific kernel
or root partition was specified. XXXX is derived from BootCurrent.
efibootmgr(8) manages these standard UEFI variables.
20180720:
zfsloader's functionality has now been folded into loader.
zfsloader is no longer necessary once you've updated your
boot blocks. For a transition period, we will install a
hardlink for zfsloader to loader to allow a smooth transition
until the boot blocks can be updated (hard link because old
zfs boot blocks don't understand symlinks).
20180719:
ARM64 now have efifb support, if you want to have serial console
on your arm64 board when an screen is connected and the bootloader
setup a frambuffer for us to use, just add :
boot_serial=YES
boot_multicons=YES
in /boot/loader.conf
For Raspberry Pi 3 (RPI) users, this is needed even if you don't have
an screen connected as the firmware will setup a framebuffer are that
u-boot will expose as an EFI framebuffer.
20180719:
New uid:gid added, ntpd:ntpd (123:123). Be sure to run mergemaster
or take steps to update /etc/passwd before doing installworld on
existing systems. Do not skip the "mergemaster -Fp" step before
installworld, as described in the update procedures near the bottom
of this document. Also, rc.d/ntpd now starts ntpd(8) as user ntpd
if the new mac_ntpd(4) policy is available, unless ntpd_flags or
the ntp config file contain options that change file/dir locations.
When such options (e.g., "statsdir" or "crypto") are used, ntpd can
still be run as non-root by setting ntpd_user=ntpd in rc.conf, after
taking steps to ensure that all required files/dirs are accessible
by the ntpd user.
20180717:
Big endian arm support has been removed.
20180711:
The static environment setup in kernel configs is no longer mutually
exclusive with the loader(8) environment by default. In order to
restore the previous default behavior of disabling the loader(8)
environment if a static environment is present, you must specify
loader_env.disabled=1 in the static environment.
20180705:
The ABI of syscalls used by management tools like sockstat and
netstat has been broken to allow 32-bit binaries to work on
64-bit kernels without modification. These programs will need
to match the kernel in order to function. External programs may
require minor modifications to accommodate a change of type in
structures from pointers to 64-bit virtual addresses.
20180702:
On i386 and amd64 atomics are now inlined. Out of tree modules using
atomics will need to be rebuilt.
20180701:
The '%I' format in the kern.corefile sysctl limits the number of
core files that a process can generate to the number stored in the
debug.ncores sysctl. The '%I' format is replaced by the single digit
index. Previously, if all indexes were taken the kernel would overwrite
only a core file with the highest index in a filename.
Currently the system will create a new core file if there is a free
index or if all slots are taken it will overwrite the oldest one.
20180630:
Clang, llvm, lld, lldb, compiler-rt and libc++ have been upgraded to
6.0.1. Please see the 20141231 entry below for information about
prerequisites and upgrading, if you are not already using clang 3.5.0
or higher.
20180628:
r335753 introduced a new quoting method. However, etc/devd/devmatch.conf
needed to be changed to work with it. This change was made with r335763
and requires a mergemaster / etcupdate / etc to update the installed file.
20180612:
r334930 changed the interface between the NFS modules, so they all
need to be rebuilt. r335018 did a __FreeBSD_version bump for this.
20180530:
As of r334391 lld is the default amd64 system linker; it is installed
as /usr/bin/ld. Kernel build workarounds (see 20180510 entry) are no
longer necessary.
20180530:
The kernel / userland interface for devinfo changed, so you'll
need a new kernel and userland as a pair for it to work (rebuilding
lib/libdevinfo is all that's required). devinfo and devmatch will
not work, but everything else will when there's a mismatch.
20180523:
The on-disk format for hwpmc callchain records has changed to include
threadid corresponding to a given record. This changes the field offsets
and thus requires that libpmcstat be rebuilt before using a kernel
later than r334108.
20180517:
The vxge(4) driver has been removed. This driver was introduced into
HEAD one week before the Exar left the Ethernet market and is not
known to be used. If you have device vxge in your kernel config file
it must be removed.
20180510:
The amd64 kernel now requires a ld that supports ifunc to produce a
working kernel, either lld or a newer binutils. lld is built by default
on amd64, and the 'buildkernel' target uses it automatically. However,
it is not the default linker, so building the kernel the traditional
way requires LD=ld.lld on the command line (or LD=/usr/local/bin/ld for
binutils port/package). lld will soon be default, and this requirement
will go away.
NOTE: As of r334391 lld is the default system linker on amd64, and no
workaround is necessary.
20180508:
The nxge(4) driver has been removed. This driver was for PCI-X 10g
cards made by s2io/Neterion. The company was aquired by Exar and
no longer sells or supports Ethernet products. If you have device
nxge in your kernel config file it must be removed.
20180504:
The tz database (tzdb) has been updated to 2018e. This version more
correctly models time stamps in time zones with negative DST such as
Europe/Dublin (from 1971 on), Europe/Prague (1946/7), and
Africa/Windhoek (1994/2017). This does not affect the UT offsets, only
time zone abbreviations and the tm_isdst flag.
20180502:
The ixgb(4) driver has been removed. This driver was for an early and
uncommon legacy PCI 10GbE for a single ASIC, Intel 82597EX. Intel
quickly shifted to the long lived ixgbe family. If you have device
ixgb in your kernel config file it must be removed.
20180501:
The lmc(4) driver has been removed. This was a WAN interface
card that was already reportedly rare in 2003, and had an ambiguous
license. If you have device lmc in your kernel config file it must
be removed.
20180413:
Support for Arcnet networks has been removed. If you have device
arcnet or device cm in your kernel config file they must be
removed.
20180411:
Support for FDDI networks has been removed. If you have device
fddi or device fpa in your kernel config file they must be
removed.
20180406:
In addition to supporting RFC 3164 formatted messages, the
syslogd(8) service is now capable of parsing RFC 5424 formatted
log messages. The main benefit of using RFC 5424 is that clients
may now send log messages with timestamps containing year numbers,
microseconds and time zone offsets.
Similarly, the syslog(3) C library function has been altered to
send RFC 5424 formatted messages to the local system logging
daemon. On systems using syslogd(8), this change should have no
negative impact, as long as syslogd(8) and the C library are
updated at the same time. On systems using a different system
logging daemon, it may be necessary to make configuration
adjustments, depending on the software used.
When using syslog-ng, add the 'syslog-protocol' flag to local
input sources to enable parsing of RFC 5424 formatted messages:
source src {
unix-dgram("/var/run/log" flags(syslog-protocol));
}
When using rsyslog, disable the 'SysSock.UseSpecialParser' option
of the 'imuxsock' module to let messages be processed by the
regular RFC 3164/5424 parsing pipeline:
module(load="imuxsock" SysSock.UseSpecialParser="off")
Do note that these changes only affect communication between local
applications and syslogd(8). The format that syslogd(8) uses to
store messages on disk or forward messages to other systems
remains unchanged. syslogd(8) still uses RFC 3164 for these
purposes. Options to customize this behaviour will be added in the
future. Utilities that process log files stored in /var/log are
thus expected to continue to function as before.
__FreeBSD_version has been incremented to 1200061 to denote this
change.
20180328:
Support for token ring networks has been removed. If you
have "device token" in your kernel config you should remove
it. No device drivers supported token ring.
20180323:
makefs was modified to be able to tag ISO9660 El Torito boot catalog
entries as EFI instead of overloading the i386 tag as done previously.
The amd64 mkisoimages.sh script used to build amd64 ISO images for
release was updated to use this. This may mean that makefs must be
updated before "make cdrom" can be run in the release directory. This
should be as simple as:
$ cd $SRCDIR/usr.sbin/makefs
$ make depend all install
20180212:
FreeBSD boot loader enhanced with Lua scripting. It's purely opt-in for
now by building WITH_LOADER_LUA and WITHOUT_FORTH in /etc/src.conf.
Co-existance for the transition period will come shortly. Booting is a
complex environment and test coverage for Lua-enabled loaders has been
thin, so it would be prudent to assume it might not work and make
provisions for backup boot methods.
20180211:
devmatch functionality has been turned on in devd. It will automatically
load drivers for unattached devices. This may cause unexpected drivers to
be loaded. Please report any problems to current@ and imp@freebsd.org.
20180114:
Clang, llvm, lld, lldb, compiler-rt and libc++ have been upgraded to
6.0.0. Please see the 20141231 entry below for information about
prerequisites and upgrading, if you are not already using clang 3.5.0
or higher.
20180110:
LLVM's lld linker is now used as the FreeBSD/amd64 bootstrap linker.
This means it is used to link the kernel and userland libraries and
executables, but is not yet installed as /usr/bin/ld by default.
To revert to ld.bfd as the bootstrap linker, in /etc/src.conf set
WITHOUT_LLD_BOOTSTRAP=yes
20180110:
On i386, pmtimer has been removed. Its functionality has been folded
into apm. It was a no-op on ACPI in current for a while now (but was still
needed on i386 in FreeBSD 11 and earlier). Users may need to remove it
from kernel config files.
20180104:
The use of RSS hash from the network card aka flowid has been
disabled by default for lagg(4) as it's currently incompatible with
the lacp and loadbalance protocols.
This can be re-enabled by setting the following in loader.conf:
net.link.lagg.default_use_flowid="1"
20180102:
The SW_WATCHDOG option is no longer necessary to enable the
hardclock-based software watchdog if no hardware watchdog is
configured. As before, SW_WATCHDOG will cause the software
watchdog to be enabled even if a hardware watchdog is configured.
20171215:
r326887 fixes the issue described in the 20171214 UPDATING entry.
r326888 flips the switch back to building GELI support always.
20171214:
r362593 broke ZFS + GELI support for reasons unknown. However,
it also broke ZFS support generally, so GELI has been turned off
by default as the lesser evil in r326857. If you boot off ZFS and/or
GELI, it might not be a good time to update.
20171125:
PowerPC users must update loader(8) by rebuilding world before
installing a new kernel, as the protocol connecting them has
changed. Without the update, loader metadata will not be passed
successfully to the kernel and users will have to enter their
root partition at the kernel mountroot prompt to continue booting.
Newer versions of loader can boot old kernels without issue.
20171110:
The LOADER_FIREWIRE_SUPPORT build variable as been renamed to
WITH/OUT_LOADER_FIREWIRE. LOADER_{NO_,}GELI_SUPPORT has been renamed
to WITH/OUT_LOADER_GELI.
20171106:
The naive and non-compliant support of posix_fallocate(2) in ZFS
has been removed as of r325320. The system call now returns EINVAL
when used on a ZFS file. Although the new behavior complies with the
standard, some consumers are not prepared to cope with it.
One known victim is lld prior to r325420.
20171102:
Building in a FreeBSD src checkout will automatically create object
directories now rather than store files in the current directory if
'make obj' was not ran. Calling 'make obj' is no longer necessary.
This feature can be disabled by setting WITHOUT_AUTO_OBJ=yes in
/etc/src-env.conf (not /etc/src.conf), or passing the option in the
environment.
20171101:
The default MAKEOBJDIR has changed from /usr/obj/<srcdir> for native
builds, and /usr/obj/<arch>/<srcdir> for cross-builds, to a unified
/usr/obj/<srcdir>/<arch>. This behavior can be changed to the old
format by setting WITHOUT_UNIFIED_OBJDIR=yes in /etc/src-env.conf,
the environment, or with -DWITHOUT_UNIFIED_OBJDIR when building.
The UNIFIED_OBJDIR option is a transitional feature that will be
removed for 12.0 release; please migrate to the new format for any
tools by looking up the OBJDIR used by 'make -V .OBJDIR' means rather
than hardcoding paths.
20171028:
The native-xtools target no longer installs the files by default to the
OBJDIR. Use the native-xtools-install target with a DESTDIR to install
to ${DESTDIR}/${NXTP} where NXTP defaults to /nxb-bin.
20171021:
As part of the boot loader infrastructure cleanup, LOADER_*_SUPPORT
options are changing from controlling the build if defined / undefined
to controlling the build with explicit 'yes' or 'no' values. They will
shift to WITH/WITHOUT options to match other options in the system.
20171010:
libstand has turned into a private library for sys/boot use only.
It is no longer supported as a public interface outside of sys/boot.
20171005:
The arm port has split armv6 into armv6 and armv7. armv7 is now
a valid TARGET_ARCH/MACHINE_ARCH setting. If you have an armv7 system
and are running a kernel from before r324363, you will need to add
MACHINE_ARCH=armv7 to 'make buildworld' to do a native build.
20171003:
When building multiple kernels using KERNCONF, non-existent KERNCONF
files will produce an error and buildkernel will fail. Previously
missing KERNCONF files silently failed giving no indication as to
why, only to subsequently discover during installkernel that the
desired kernel was never built in the first place.
20170912:
The default serial number format for CTL LUNs has changed. This will
affect users who use /dev/diskid/* device nodes, or whose FibreChannel
or iSCSI clients care about their LUNs' serial numbers. Users who
require serial number stability should hardcode serial numbers in
/etc/ctl.conf .
20170912:
For 32-bit arm compiled for hard-float support, soft-floating point
binaries now always get their shared libraries from
LD_SOFT_LIBRARY_PATH (in the past, this was only used if
/usr/libsoft also existed). Only users with a hard-float ld.so, but
soft-float everything else should be affected.
20170826:
The geli password typed at boot is now hidden. To restore the previous
behavior, see geli(8) for configuration options.
20170825:
Move PMTUD blackhole counters to TCPSTATS and remove them from bare
sysctl values. Minor nit, but requires a rebuild of both world/kernel
to complete.
20170814:
"make check" behavior (made in ^/head@r295380) has been changed to
execute from a limited sandbox, as opposed to executing from
${TESTSDIR}.
Behavioral changes:
- The "beforecheck" and "aftercheck" targets are now specified.
- ${CHECKDIR} (added in commit noted above) has been removed.
- Legacy behavior can be enabled by setting
WITHOUT_MAKE_CHECK_USE_SANDBOX in src.conf(5) or the environment.
If the limited sandbox mode is enabled, "make check" will execute
"make distribution", then install, execute the tests, and clean up the
sandbox if successful.
The "make distribution" and "make install" targets are typically run as
root to set appropriate permissions and ownership at installation time.
The end-user should set "WITH_INSTALL_AS_USER" in src.conf(5) or the
environment if executing "make check" with limited sandbox mode using
an unprivileged user.
20170808:
Since the switch to GPT disk labels, fsck for UFS/FFS has been
unable to automatically find alternate superblocks. As of r322297,
the information needed to find alternate superblocks has been
moved to the end of the area reserved for the boot block.
Filesystems created with a newfs of this vintage or later
will create the recovery information. If you have a filesystem
created prior to this change and wish to have a recovery block
created for your filesystem, you can do so by running fsck in
foreground mode (i.e., do not use the -p or -y options). As it
starts, fsck will ask ``SAVE DATA TO FIND ALTERNATE SUPERBLOCKS''
to which you should answer yes.
20170728:
As of r321665, an NFSv4 server configuration that services
Kerberos mounts or clients that do not support the uid/gid in
owner/owner_group string capability, must explicitly enable
the nfsuserd daemon by adding nfsuserd_enable="YES" to the
machine's /etc/rc.conf file.
20170722:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 5.0.0.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20170701:
WITHOUT_RCMDS is now the default. Set WITH_RCMDS if you need the
r-commands (rlogin, rsh, etc.) to be built with the base system.
20170625:
The FreeBSD/powerpc platform now uses a 64-bit type for time_t. This is
a very major ABI incompatible change, so users of FreeBSD/powerpc must
be careful when performing source upgrades. It is best to run
'make installworld' from an alternate root system, either a live
CD/memory stick, or a temporary root partition. Additionally, all ports
must be recompiled. powerpc64 is largely unaffected, except in the case
of 32-bit compatibility. All 32-bit binaries will be affected.
20170623:
Forward compatibility for the "ino64" project have been committed. This
will allow most new binaries to run on older kernels in a limited
fashion. This prevents many of the common foot-shooting actions in the
upgrade as well as the limited ability to roll back the kernel across
the ino64 upgrade. Complicated use cases may not work properly, though
enough simpler ones work to allow recovery in most situations.
20170620:
Switch back to the BSDL dtc (Device Tree Compiler). Set WITH_GPL_DTC
if you require the GPL compiler.
20170618:
The internal ABI used for communication between the NFS kernel modules
was changed by r320085, so __FreeBSD_version was bumped to
ensure all the NFS related modules are updated together.
20170617:
The ABI of struct event was changed by extending the data
member to 64bit and adding ext fields. For upgrade, same
precautions as for the entry 20170523 "ino64" must be
followed.
20170531:
The GNU roff toolchain has been removed from base. To render manpages
which are not supported by mandoc(1), man(1) can fallback on GNU roff
from ports (and recommends to install it).
To render roff(7) documents, consider using GNU roff from ports or the
heirloom doctools roff toolchain from ports via pkg install groff or
via pkg install heirloom-doctools.
20170524:
The ath(4) and ath_hal(4) modules now build piecemeal to allow for
smaller runtime footprint builds. This is useful for embedded systems
which only require one chipset support.
If you load it as a module, make sure this is in /boot/loader.conf:
if_ath_load="YES"
This will load the HAL, all chip/RF backends and if_ath_pci.
If you have if_ath_pci in /boot/loader.conf, ensure it is after
if_ath or it will not load any HAL chipset support.
If you want to selectively load things (eg on ye cheape ARM/MIPS
platforms where RAM is at a premium) you should:
* load ath_hal
* load the chip modules in question
* load ath_rate, ath_dfs
* load ath_main
* load if_ath_pci and/or if_ath_ahb depending upon your particular
bus bind type - this is where probe/attach is done.
For further comments/feedback, poke adrian@ .
20170523:
The "ino64" 64-bit inode project has been committed, which extends
a number of types to 64 bits. Upgrading in place requires care and
adherence to the documented upgrade procedure.
If using a custom kernel configuration ensure that the
COMPAT_FREEBSD11 option is included (as during the upgrade the
system will be running the ino64 kernel with the existing world).
For the safest in-place upgrade begin by removing previous build
artifacts via "rm -rf /usr/obj/*". Then, carefully follow the
full procedure documented below under the heading "To rebuild
everything and install it on the current system." Specifically,
a reboot is required after installing the new kernel before
installing world.
20170424:
The NATM framework including the en(4), fatm(4), hatm(4), and
patm(4) devices has been removed. Consumers should plan a
migration before the end-of-life date for FreeBSD 11.
20170420:
GNU diff has been replaced by a BSD licensed diff. Some features of GNU
diff has not been implemented, if those are needed a newer version of
GNU diff is available via the diffutils package under the gdiff name.
20170413:
As of r316810 for ipfilter, keep frags is no longer assumed when
keep state is specified in a rule. r316810 aligns ipfilter with
documentation in man pages separating keep frags from keep state.
This allows keep state to be specified without forcing keep frags
and allows keep frags to be specified independently of keep state.
To maintain previous behaviour, also specify keep frags with
keep state (as documented in ipf.conf.5).
20170407:
arm64 builds now use the base system LLD 4.0.0 linker by default,
instead of requiring that the aarch64-binutils port or package be
installed. To continue using aarch64-binutils, set
CROSS_BINUTILS_PREFIX=/usr/local/aarch64-freebsd/bin .
20170405:
The UDP optimization in entry 20160818 that added the sysctl
net.inet.udp.require_l2_bcast has been reverted. L2 broadcast
packets will no longer be treated as L3 broadcast packets.
20170331:
Binds and sends to the loopback addresses, IPv6 and IPv4, will now
use any explicitly assigned loopback address available in the jail
instead of using the first assigned address of the jail.
20170329:
The ctl.ko module no longer implements the iSCSI target frontend:
cfiscsi.ko does instead.
If building cfiscsi.ko as a kernel module, the module can be loaded
via one of the following methods:
- `cfiscsi_load="YES"` in loader.conf(5).
- Add `cfiscsi` to `$kld_list` in rc.conf(5).
- ctladm(8)/ctld(8), when compiled with iSCSI support
(`WITH_ISCSI=yes` in src.conf(5))
Please see cfiscsi(4) for more details.
20170316:
The mmcsd.ko module now additionally depends on geom_flashmap.ko.
Also, mmc.ko and mmcsd.ko need to be a matching pair built from the
same source (previously, the dependency of mmcsd.ko on mmc.ko was
missing, but mmcsd.ko now will refuse to load if it is incompatible
with mmc.ko).
20170315:
The syntax of ipfw(8) named states was changed to avoid ambiguity.
If you have used named states in the firewall rules, you need to modify
them after installworld and before rebooting. Now named states must
be prefixed with colon.
20170311:
The old drm (sys/dev/drm/) drivers for i915 and radeon have been
removed as the userland we provide cannot use them. The KMS version
(sys/dev/drm2) supports the same hardware.
20170302:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 4.0.0.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20170221:
The code that provides support for ZFS .zfs/ directory functionality
has been reimplemented. It's not possible now to create a snapshot
by mkdir under .zfs/snapshot/. That should be the only user visible
change.
20170216:
EISA bus support has been removed. The WITH_EISA option is no longer
valid.
20170215:
MCA bus support has been removed.
20170127:
The WITH_LLD_AS_LD / WITHOUT_LLD_AS_LD build knobs have been renamed
WITH_LLD_IS_LD / WITHOUT_LLD_IS_LD, for consistency with CLANG_IS_CC.
20170112:
The EM_MULTIQUEUE kernel configuration option is deprecated now that
the em(4) driver conforms to iflib specifications.
20170109:
The igb(4), em(4) and lem(4) ethernet drivers are now implemented via
IFLIB. If you have a custom kernel configuration that excludes em(4)
but you use igb(4), you need to re-add em(4) to your custom configuration.
20161217:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 3.9.1.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20161124:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 3.9.0.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20161119:
The layout of the pmap structure has changed for powerpc to put the pmap
statistics at the front for all CPU variations. libkvm(3) and all tools
that link against it need to be recompiled.
20161030:
isl(4) and cyapa(4) drivers now require a new driver,
chromebook_platform(4), to work properly on Chromebook-class hardware.
On other types of hardware the drivers may need to be configured using
device hints. Please see the corresponding manual pages for details.
20161017:
The urtwn(4) driver was merged into rtwn(4) and now consists of
rtwn(4) main module + rtwn_usb(4) and rtwn_pci(4) bus-specific
parts.
Also, firmware for RTL8188CE was renamed due to possible name
conflict (rtwnrtl8192cU(B) -> rtwnrtl8192cE(B))
20161015:
GNU rcs has been removed from base. It is available as packages:
- rcs: Latest GPLv3 GNU rcs version.
- rcs57: Copy of the latest version of GNU rcs (GPLv2) before it was
removed from base.
20161008:
Use of the cc_cdg, cc_chd, cc_hd, or cc_vegas congestion control
modules now requires that the kernel configuration contain the
TCP_HHOOK option. (This option is included in the GENERIC kernel.)
20161003:
The WITHOUT_ELFCOPY_AS_OBJCOPY src.conf(5) knob has been retired.
ELF Tool Chain's elfcopy is always installed as /usr/bin/objcopy.
20160924:
Relocatable object files with the extension of .So have been renamed
to use an extension of .pico instead. The purpose of this change is
to avoid a name clash with shared libraries on case-insensitive file
systems. On those file systems, foo.So is the same file as foo.so.
20160918:
GNU rcs has been turned off by default. It can (temporarily) be built
again by adding WITH_RCS knob in src.conf.
Otherwise, GNU rcs is available from packages:
- rcs: Latest GPLv3 GNU rcs version.
- rcs57: Copy of the latest version of GNU rcs (GPLv2) from base.
20160918:
The backup_uses_rcs functionality has been removed from rc.subr.
20160908:
The queue(3) debugging macro, QUEUE_MACRO_DEBUG, has been split into
two separate components, QUEUE_MACRO_DEBUG_TRACE and
QUEUE_MACRO_DEBUG_TRASH. Define both for the original
QUEUE_MACRO_DEBUG behavior.
20160824:
r304787 changed some ioctl interfaces between the iSCSI userspace
programs and the kernel. ctladm, ctld, iscsictl, and iscsid must be
rebuilt to work with new kernels. __FreeBSD_version has been bumped
to 1200005.
20160818:
The UDP receive code has been updated to only treat incoming UDP
packets that were addressed to an L2 broadcast address as L3
broadcast packets. It is not expected that this will affect any
standards-conforming UDP application. The new behaviour can be
disabled by setting the sysctl net.inet.udp.require_l2_bcast to
0.
20160818:
Remove the openbsd_poll system call.
__FreeBSD_version has been bumped because of this.
20160708:
The stable/11 branch has been created from head@r302406.
20160622:
The libc stub for the pipe(2) system call has been replaced with
a wrapper that calls the pipe2(2) system call and the pipe(2)
system call is now only implemented by the kernels that include
"options COMPAT_FREEBSD10" in their config file (this is the
default). Users should ensure that this option is enabled in
their kernel or upgrade userspace to r302092 before upgrading their
kernel.
20160527:
CAM will now strip leading spaces from SCSI disks' serial numbers.
This will affect users who create UFS filesystems on SCSI disks using
those disk's diskid device nodes. For example, if /etc/fstab
previously contained a line like
"/dev/diskid/DISK-%20%20%20%20%20%20%20ABCDEFG0123456", you should
change it to "/dev/diskid/DISK-ABCDEFG0123456". Users of geom
transforms like gmirror may also be affected. ZFS users should
generally be fine.
20160523:
The bitstring(3) API has been updated with new functionality and
improved performance. But it is binary-incompatible with the old API.
Objects built with the new headers may not be linked against objects
built with the old headers.
20160520:
The brk and sbrk functions have been removed from libc on arm64.
Binutils from ports has been updated to not link to these
functions and should be updated to the latest version before
installing a new libc.
20160517:
The armv6 port now defaults to hard float ABI. Limited support
for running both hardfloat and soft float on the same system
is available using the libraries installed with -DWITH_LIBSOFT.
This has only been tested as an upgrade path for installworld
and packages may fail or need manual intervention to run. New
packages will be needed.
To update an existing self-hosted armv6hf system, you must add
TARGET_ARCH=armv6 on the make command line for both the build
and the install steps.
20160510:
Kernel modules compiled outside of a kernel build now default to
installing to /boot/modules instead of /boot/kernel. Many kernel
modules built this way (such as those in ports) already overrode
KMODDIR explicitly to install into /boot/modules. However,
manually building and installing a module from /sys/modules will
now install to /boot/modules instead of /boot/kernel.
20160414:
The CAM I/O scheduler has been committed to the kernel. There should be
no user visible impact. This does enable NCQ Trim on ada SSDs. While the
list of known rogues that claim support for this but actually corrupt
data is believed to be complete, be on the lookout for data
corruption. The known rogue list is believed to be complete:
o Crucial MX100, M550 drives with MU01 firmware.
o Micron M510 and M550 drives with MU01 firmware.
o Micron M500 prior to MU07 firmware
o Samsung 830, 840, and 850 all firmwares
o FCCT M500 all firmwares
Crucial has firmware http://www.crucial.com/usa/en/support-ssd-firmware
with working NCQ TRIM. For Micron branded drives, see your sales rep for
updated firmware. Black listed drives will work correctly because these
drives work correctly so long as no NCQ TRIMs are sent to them. Given
this list is the same as found in Linux, it's believed there are no
other rogues in the market place. All other models from the above
vendors work.
To be safe, if you are at all concerned, you can quirk each of your
drives to prevent NCQ from being sent by setting:
kern.cam.ada.X.quirks="0x2"
in loader.conf. If the drive requires the 4k sector quirk, set the
quirks entry to 0x3.
20160330:
The FAST_DEPEND build option has been removed and its functionality is
now the one true way. The old mkdep(1) style of 'make depend' has
been removed. See 20160311 for further details.
20160317:
Resource range types have grown from unsigned long to uintmax_t. All
drivers, and anything using libdevinfo, need to be recompiled.
20160311:
WITH_FAST_DEPEND is now enabled by default for in-tree and out-of-tree
builds. It no longer runs mkdep(1) during 'make depend', and the
'make depend' stage can safely be skipped now as it is auto ran
when building 'make all' and will generate all SRCS and DPSRCS before
building anything else. Dependencies are gathered at compile time with
-MF flags kept in separate .depend files per object file. Users should
run 'make cleandepend' once if using -DNO_CLEAN to clean out older
stale .depend files.
20160306:
On amd64, clang 3.8.0 can now insert sections of type AMD64_UNWIND into
kernel modules. Therefore, if you load any kernel modules at boot time,
please install the boot loaders after you install the kernel, but before
rebooting, e.g.:
make buildworld
make buildkernel KERNCONF=YOUR_KERNEL_HERE
make installkernel KERNCONF=YOUR_KERNEL_HERE
make -C sys/boot install
<reboot in single user>
Then follow the usual steps, described in the General Notes section,
below.
20160305:
Clang, llvm, lldb and compiler-rt have been upgraded to 3.8.0. Please
see the 20141231 entry below for information about prerequisites and
upgrading, if you are not already using clang 3.5.0 or higher.
20160301:
The AIO subsystem is now a standard part of the kernel. The
VFS_AIO kernel option and aio.ko kernel module have been removed.
Due to stability concerns, asynchronous I/O requests are only
permitted on sockets and raw disks by default. To enable
asynchronous I/O requests on all file types, set the
vfs.aio.enable_unsafe sysctl to a non-zero value.
20160226:
The ELF object manipulation tool objcopy is now provided by the
ELF Tool Chain project rather than by GNU binutils. It should be a
drop-in replacement, with the addition of arm64 support. The
(temporary) src.conf knob WITHOUT_ELFCOPY_AS_OBJCOPY knob may be set
to obtain the GNU version if necessary.
20160129:
Building ZFS pools on top of zvols is prohibited by default. That
feature has never worked safely; it's always been prone to deadlocks.
Using a zvol as the backing store for a VM guest's virtual disk will
still work, even if the guest is using ZFS. Legacy behavior can be
restored by setting vfs.zfs.vol.recursive=1.
20160119:
The NONE and HPN patches has been removed from OpenSSH. They are
still available in the security/openssh-portable port.
20160113:
With the addition of ypldap(8), a new _ypldap user is now required
during installworld. "mergemaster -p" can be used to add the user
prior to installworld, as documented in the handbook.
20151216:
The tftp loader (pxeboot) now uses the option root-path directive. As a
consequence it no longer looks for a pxeboot.4th file on the tftp
server. Instead it uses the regular /boot infrastructure as with the
other loaders.
20151211:
The code to start recording plug and play data into the modules has
been committed. While the old tools will properly build a new kernel,
a number of warnings about "unknown metadata record 4" will be produced
for an older kldxref. To avoid such warnings, make sure to rebuild
the kernel toolchain (or world). Make sure that you have r292078 or
later when trying to build 292077 or later before rebuilding.
20151207:
Debug data files are now built by default with 'make buildworld' and
installed with 'make installworld'. This facilitates debugging but
requires more disk space both during the build and for the installed
world. Debug files may be disabled by setting WITHOUT_DEBUG_FILES=yes
in src.conf(5).
20151130:
r291527 changed the internal interface between the nfsd.ko and
nfscommon.ko modules. As such, they must both be upgraded to-gether.
__FreeBSD_version has been bumped because of this.
20151108:
Add support for unicode collation strings leads to a change of
order of files listed by ls(1) for example. To get back to the old
behaviour, set LC_COLLATE environment variable to "C".
Databases administrators will need to reindex their databases given
collation results will be different.
Due to a bug in install(1) it is recommended to remove the ancient
locales before running make installworld.
rm -rf /usr/share/locale/*
20151030:
The OpenSSL has been upgraded to 1.0.2d. Any binaries requiring
libcrypto.so.7 or libssl.so.7 must be recompiled.
20151020:
Qlogic 24xx/25xx firmware images were updated from 5.5.0 to 7.3.0.
Kernel modules isp_2400_multi and isp_2500_multi were removed and
should be replaced with isp_2400 and isp_2500 modules respectively.
20151017:
The build previously allowed using 'make -n' to not recurse into
sub-directories while showing what commands would be executed, and
'make -n -n' to recursively show commands. Now 'make -n' will recurse
and 'make -N' will not.
20151012:
If you specify SENDMAIL_MC or SENDMAIL_CF in make.conf, mergemaster
and etcupdate will now use this file. A custom sendmail.cf is now
updated via this mechanism rather than via installworld. If you had
excluded sendmail.cf in mergemaster.rc or etcupdate.conf, you may
want to remove the exclusion or change it to "always install".
/etc/mail/sendmail.cf is now managed the same way regardless of
whether SENDMAIL_MC/SENDMAIL_CF is used. If you are not using
SENDMAIL_MC/SENDMAIL_CF there should be no change in behavior.
20151011:
Compatibility shims for legacy ATA device names have been removed.
It includes ATA_STATIC_ID kernel option, kern.cam.ada.legacy_aliases
and kern.geom.raid.legacy_aliases loader tunables, kern.devalias.*
environment variables, /dev/ad* and /dev/ar* symbolic links.
20151006:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 3.7.0.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20150924:
Kernel debug files have been moved to /usr/lib/debug/boot/kernel/,
and renamed from .symbols to .debug. This reduces the size requirements
on the boot partition or file system and provides consistency with
userland debug files.
When using the supported kernel installation method the
/usr/lib/debug/boot/kernel directory will be renamed (to kernel.old)
as is done with /boot/kernel.
Developers wishing to maintain the historical behavior of installing
debug files in /boot/kernel/ can set KERN_DEBUGDIR="" in src.conf(5).
20150827:
The wireless drivers had undergone changes that remove the 'parent
interface' from the ifconfig -l output. The rc.d network scripts
used to check presence of a parent interface in the list, so old
scripts would fail to start wireless networking. Thus, etcupdate(3)
or mergemaster(8) run is required after kernel update, to update your
rc.d scripts in /etc.
20150827:
pf no longer supports 'scrub fragment crop' or 'scrub fragment drop-ovl'
These configurations are now automatically interpreted as
'scrub fragment reassemble'.
20150817:
Kernel-loadable modules for the random(4) device are back. To use
them, the kernel must have
device random
options RANDOM_LOADABLE
kldload(8) can then be used to load random_fortuna.ko
or random_yarrow.ko. Please note that due to the indirect
function calls that the loadable modules need to provide,
the build-in variants will be slightly more efficient.
The random(4) kernel option RANDOM_DUMMY has been retired due to
unpopularity. It was not all that useful anyway.
20150813:
The WITHOUT_ELFTOOLCHAIN_TOOLS src.conf(5) knob has been retired.
Control over building the ELF Tool Chain tools is now provided by
the WITHOUT_TOOLCHAIN knob.
20150810:
The polarity of Pulse Per Second (PPS) capture events with the
uart(4) driver has been corrected. Prior to this change the PPS
"assert" event corresponded to the trailing edge of a positive PPS
pulse and the "clear" event was the leading edge of the next pulse.
As the width of a PPS pulse in a typical GPS receiver is on the
order of 1 millisecond, most users will not notice any significant
difference with this change.
Anyone who has compensated for the historical polarity reversal by
configuring a negative offset equal to the pulse width will need to
remove that workaround.
20150809:
The default group assigned to /dev/dri entries has been changed
from 'wheel' to 'video' with the id of '44'. If you want to have
access to the dri devices please add yourself to the video group
with:
# pw groupmod video -m $USER
20150806:
The menu.rc and loader.rc files will now be replaced during
upgrades. Please migrate local changes to menu.rc.local and
loader.rc.local instead.
20150805:
GNU Binutils versions of addr2line, c++filt, nm, readelf, size,
strings and strip have been removed. The src.conf(5) knob
WITHOUT_ELFTOOLCHAIN_TOOLS no longer provides the binutils tools.
20150728:
As ZFS requires more kernel stack pages than is the default on some
architectures e.g. i386, it now warns if KSTACK_PAGES is less than
ZFS_MIN_KSTACK_PAGES (which is 4 at the time of writing).
Please consider using 'options KSTACK_PAGES=X' where X is greater
than or equal to ZFS_MIN_KSTACK_PAGES i.e. 4 in such configurations.
20150706:
sendmail has been updated to 8.15.2. Starting with FreeBSD 11.0
and sendmail 8.15, sendmail uses uncompressed IPv6 addresses by
default, i.e., they will not contain "::". For example, instead
of ::1, it will be 0:0:0:0:0:0:0:1. This permits a zero subnet
to have a more specific match, such as different map entries for
IPv6:0:0 vs IPv6:0. This change requires that configuration
data (including maps, files, classes, custom ruleset, etc.) must
use the same format, so make certain such configuration data is
upgrading. As a very simple check search for patterns like
'IPv6:[0-9a-fA-F:]*::' and 'IPv6::'. To return to the old
behavior, set the m4 option confUSE_COMPRESSED_IPV6_ADDRESSES or
the cf option UseCompressedIPv6Addresses.
20150630:
The default kernel entropy-processing algorithm is now
Fortuna, replacing Yarrow.
Assuming you have 'device random' in your kernel config
file, the configurations allow a kernel option to override
this default. You may choose *ONE* of:
options RANDOM_YARROW # Legacy /dev/random algorithm.
options RANDOM_DUMMY # Blocking-only driver.
If you have neither, you get Fortuna. For most people,
read no further, Fortuna will give a /dev/random that works
like it always used to, and the difference will be irrelevant.
If you remove 'device random', you get *NO* kernel-processed
entropy at all. This may be acceptable to folks building
embedded systems, but has complications. Carry on reading,
and it is assumed you know what you need.
*PLEASE* read random(4) and random(9) if you are in the
habit of tweaking kernel configs, and/or if you are a member
of the embedded community, wanting specific and not-usual
behaviour from your security subsystems.
NOTE!! If you use RANDOM_DUMMY and/or have no 'device
random', you will NOT have a functioning /dev/random, and
many cryptographic features will not work, including SSH.
You may also find strange behaviour from the random(3) set
of library functions, in particular sranddev(3), srandomdev(3)
and arc4random(3). The reason for this is that the KERN_ARND
sysctl only returns entropy if it thinks it has some to
share, and with RANDOM_DUMMY or no 'device random' this
will never happen.
20150623:
An additional fix for the issue described in the 20150614 sendmail
entry below has been committed in revision 284717.
20150616:
FreeBSD's old make (fmake) has been removed from the system. It is
available as the devel/fmake port or via pkg install fmake.
20150615:
The fix for the issue described in the 20150614 sendmail entry
below has been committed in revision 284436. The work
around described in that entry is no longer needed unless the
default setting is overridden by a confDH_PARAMETERS configuration
setting of '5' or pointing to a 512 bit DH parameter file.
20150614:
ALLOW_DEPRECATED_ATF_TOOLS/ATFFILE support has been removed from
atf.test.mk (included from bsd.test.mk). Please upgrade devel/atf
and devel/kyua to version 0.20+ and adjust any calling code to work
with Kyuafile and kyua.
20150614:
The import of openssl to address the FreeBSD-SA-15:10.openssl
security advisory includes a change which rejects handshakes
with DH parameters below 768 bits. sendmail releases prior
to 8.15.2 (not yet released), defaulted to a 512 bit
DH parameter setting for client connections. To work around
this interoperability, sendmail can be configured to use a
2048 bit DH parameter by:
1. Edit /etc/mail/`hostname`.mc
2. If a setting for confDH_PARAMETERS does not exist or
exists and is set to a string beginning with '5',
replace it with '2'.
3. If a setting for confDH_PARAMETERS exists and is set to
a file path, create a new file with:
openssl dhparam -out /path/to/file 2048
4. Rebuild the .cf file:
cd /etc/mail/; make; make install
5. Restart sendmail:
cd /etc/mail/; make restart
A sendmail patch is coming, at which time this file will be
updated.
20150604:
Generation of legacy formatted entries have been disabled by default
in pwd_mkdb(8), as all base system consumers of the legacy formatted
entries were converted to use the new format by default when the new,
machine independent format have been added and supported since FreeBSD
5.x.
Please see the pwd_mkdb(8) manual page for further details.
20150525:
Clang and llvm have been upgraded to 3.6.1 release. Please see the
20141231 entry below for information about prerequisites and upgrading,
if you are not already using 3.5.0 or higher.
20150521:
TI platform code switched to using vendor DTS files and this update
may break existing systems running on Beaglebone, Beaglebone Black,
and Pandaboard:
- dtb files should be regenerated/reinstalled. Filenames are the
same but content is different now
- GPIO addressing was changed, now each GPIO bank (32 pins per bank)
has its own /dev/gpiocX device, e.g. pin 121 on /dev/gpioc0 in old
addressing scheme is now pin 25 on /dev/gpioc3.
- Pandaboard: /etc/ttys should be updated, serial console device is
now /dev/ttyu2, not /dev/ttyu0
20150501:
soelim(1) from gnu/usr.bin/groff has been replaced by usr.bin/soelim.
If you need the GNU extension from groff soelim(1), install groff
from package: pkg install groff, or via ports: textproc/groff.
20150423:
chmod, chflags, chown and chgrp now affect symlinks in -R mode as
defined in symlink(7); previously symlinks were silently ignored.
20150415:
The const qualifier has been removed from iconv(3) to comply with
POSIX. The ports tree is aware of this from r384038 onwards.
20150416:
Libraries specified by LIBADD in Makefiles must have a corresponding
DPADD_<lib> variable to ensure correct dependencies. This is now
enforced in src.libnames.mk.
20150324:
From legacy ata(4) driver was removed support for SATA controllers
supported by more functional drivers ahci(4), siis(4) and mvs(4).
Kernel modules ataahci and ataadaptec were removed completely,
replaced by ahci and mvs modules respectively.
20150315:
Clang, llvm and lldb have been upgraded to 3.6.0 release. Please see
the 20141231 entry below for information about prerequisites and
upgrading, if you are not already using 3.5.0 or higher.
20150307:
The 32-bit PowerPC kernel has been changed to a position-independent
executable. This can only be booted with a version of loader(8)
newer than January 31, 2015, so make sure to update both world and
kernel before rebooting.
20150217:
If you are running a -CURRENT kernel since r273872 (Oct 30th, 2014),
but before r278950, the RNG was not seeded properly. Immediately
upgrade the kernel to r278950 or later and regenerate any keys (e.g.
ssh keys or openssl keys) that were generated w/ a kernel from that
range. This does not affect programs that directly used /dev/random
or /dev/urandom. All userland uses of arc4random(3) are affected.
20150210:
The autofs(4) ABI was changed in order to restore binary compatibility
with 10.1-RELEASE. The automountd(8) daemon needs to be rebuilt to work
with the new kernel.
20150131:
The powerpc64 kernel has been changed to a position-independent
executable. This can only be booted with a new version of loader(8),
so make sure to update both world and kernel before rebooting.
20150118:
Clang and llvm have been upgraded to 3.5.1 release. This is a bugfix
only release, no new features have been added. Please see the 20141231
entry below for information about prerequisites and upgrading, if you
are not already using 3.5.0.
20150107:
ELF tools addr2line, elfcopy (strip), nm, size, and strings are now
taken from the ELF Tool Chain project rather than GNU binutils. They
should be drop-in replacements, with the addition of arm64 support.
The WITHOUT_ELFTOOLCHAIN_TOOLS= knob may be used to obtain the
binutils tools, if necessary. See 20150805 for updated information.
20150105:
The default Unbound configuration now enables remote control
using a local socket. Users who have already enabled the
local_unbound service should regenerate their configuration
by running "service local_unbound setup" as root.
20150102:
The GNU texinfo and GNU info pages have been removed.
To be able to view GNU info pages please install texinfo from ports.
20141231:
Clang, llvm and lldb have been upgraded to 3.5.0 release.
As of this release, a prerequisite for building clang, llvm and lldb is
a C++11 capable compiler and C++11 standard library. This means that to
be able to successfully build the cross-tools stage of buildworld, with
clang as the bootstrap compiler, your system compiler or cross compiler
should either be clang 3.3 or later, or gcc 4.8 or later, and your
system C++ library should be libc++, or libdstdc++ from gcc 4.8 or
later.
On any standard FreeBSD 10.x or 11.x installation, where clang and
libc++ are on by default (that is, on x86 or arm), this should work out
of the box.
On 9.x installations where clang is enabled by default, e.g. on x86 and
powerpc, libc++ will not be enabled by default, so libc++ should be
built (with clang) and installed first. If both clang and libc++ are
missing, build clang first, then use it to build libc++.
On 8.x and earlier installations, upgrade to 9.x first, and then follow
the instructions for 9.x above.
Sparc64 and mips users are unaffected, as they still use gcc 4.2.1 by
default, and do not build clang.
Many embedded systems are resource constrained, and will not be able to
build clang in a reasonable time, or in some cases at all. In those
cases, cross building bootable systems on amd64 is a workaround.
This new version of clang introduces a number of new warnings, of which
the following are most likely to appear:
-Wabsolute-value
This warns in two cases, for both C and C++:
* When the code is trying to take the absolute value of an unsigned
quantity, which is effectively a no-op, and almost never what was
intended. The code should be fixed, if at all possible. If you are
sure that the unsigned quantity can be safely cast to signed, without
loss of information or undefined behavior, you can add an explicit
cast, or disable the warning.
* When the code is trying to take an absolute value, but the called
abs() variant is for the wrong type, which can lead to truncation.
If you want to disable the warning instead of fixing the code, please
make sure that truncation will not occur, or it might lead to unwanted
side-effects.
-Wtautological-undefined-compare and
-Wundefined-bool-conversion
These warn when C++ code is trying to compare 'this' against NULL, while
'this' should never be NULL in well-defined C++ code. However, there is
some legacy (pre C++11) code out there, which actively abuses this
feature, which was less strictly defined in previous C++ versions.
Squid and openjdk do this, for example. The warning can be turned off
for C++98 and earlier, but compiling the code in C++11 mode might result
in unexpected behavior; for example, the parts of the program that are
unreachable could be optimized away.
20141222:
The old NFS client and server (kernel options NFSCLIENT, NFSSERVER)
kernel sources have been removed. The .h files remain, since some
utilities include them. This will need to be fixed later.
If "mount -t oldnfs ..." is attempted, it will fail.
If the "-o" option on mountd(8), nfsd(8) or nfsstat(1) is used,
the utilities will report errors.
20141121:
The handling of LOCAL_LIB_DIRS has been altered to skip addition of
directories to top level SUBDIR variable when their parent
directory is included in LOCAL_DIRS. Users with build systems with
such hierarchies and without SUBDIR entries in the parent
directory Makefiles should add them or add the directories to
LOCAL_DIRS.
20141109:
faith(4) and faithd(8) have been removed from the base system. Faith
has been obsolete for a very long time.
20141104:
vt(4), the new console driver, is enabled by default. It brings
support for Unicode and double-width characters, as well as
support for UEFI and integration with the KMS kernel video
drivers.
You may need to update your console settings in /etc/rc.conf,
most probably the keymap. During boot, /etc/rc.d/syscons will
indicate what you need to do.
vt(4) still has issues and lacks some features compared to
syscons(4). See the wiki for up-to-date information:
https://wiki.freebsd.org/Newcons
If you want to keep using syscons(4), you can do so by adding
the following line to /boot/loader.conf:
kern.vty=sc
20141102:
pjdfstest has been integrated into kyua as an opt-in test suite.
Please see share/doc/pjdfstest/README for more details on how to
execute it.
20141009:
gperf has been removed from the base system for architectures
that use clang. Ports that require gperf will obtain it from the
devel/gperf port.
20140923:
pjdfstest has been moved from tools/regression/pjdfstest to
contrib/pjdfstest .
20140922:
At svn r271982, The default linux compat kernel ABI has been adjusted
to 2.6.18 in support of the linux-c6 compat ports infrastructure
update. If you wish to continue using the linux-f10 compat ports,
add compat.linux.osrelease=2.6.16 to your local sysctl.conf. Users are
encouraged to update their linux-compat packages to linux-c6 during
their next update cycle.
20140729:
The ofwfb driver, used to provide a graphics console on PowerPC when
using vt(4), no longer allows mmap() of all physical memory. This
will prevent Xorg on PowerPC with some ATI graphics cards from
initializing properly unless x11-servers/xorg-server is updated to
1.12.4_8 or newer.
20140723:
The xdev targets have been converted to using TARGET and
TARGET_ARCH instead of XDEV and XDEV_ARCH.
20140719:
The default unbound configuration has been modified to address
issues with reverse lookups on networks that use private
address ranges. If you use the local_unbound service, run
"service local_unbound setup" as root to regenerate your
configuration, then "service local_unbound reload" to load the
new configuration.
20140709:
The GNU texinfo and GNU info pages are not built and installed
anymore, WITH_INFO knob has been added to allow to built and install
them again.
UPDATE: see 20150102 entry on texinfo's removal
20140708:
The GNU readline library is now an INTERNALLIB - that is, it is
statically linked into consumers (GDB and variants) in the base
system, and the shared library is no longer installed. The
devel/readline port is available for third party software that
requires readline.
20140702:
The Itanium architecture (ia64) has been removed from the list of
known architectures. This is the first step in the removal of the
architecture.
20140701:
Commit r268115 has added NFSv4.1 server support, merged from
projects/nfsv4.1-server. Since this includes changes to the
internal interfaces between the NFS related modules, a full
build of the kernel and modules will be necessary.
__FreeBSD_version has been bumped.
20140629:
The WITHOUT_VT_SUPPORT kernel config knob has been renamed
WITHOUT_VT. (The other _SUPPORT knobs have a consistent meaning
which differs from the behaviour controlled by this knob.)
20140619:
Maximal length of the serial number in CTL was increased from 16 to
64 chars, that breaks ABI. All CTL-related tools, such as ctladm
and ctld, need to be rebuilt to work with a new kernel.
20140606:
The libatf-c and libatf-c++ major versions were downgraded to 0 and
1 respectively to match the upstream numbers. They were out of
sync because, when they were originally added to FreeBSD, the
upstream versions were not respected. These libraries are private
and not yet built by default, so renumbering them should be a
non-issue. However, unclean source trees will yield broken test
programs once the operator executes "make delete-old-libs" after a
"make installworld".
Additionally, the atf-sh binary was made private by moving it into
/usr/libexec/. Already-built shell test programs will keep the
path to the old binary so they will break after "make delete-old"
is run.
If you are using WITH_TESTS=yes (not the default), wipe the object
tree and rebuild from scratch to prevent spurious test failures.
This is only needed once: the misnumbered libraries and misplaced
binaries have been added to OptionalObsoleteFiles.inc so they will
be removed during a clean upgrade.
20140512:
Clang and llvm have been upgraded to 3.4.1 release.
20140508:
We bogusly installed src.opts.mk in /usr/share/mk. This file should
be removed to avoid issues in the future (and has been added to
ObsoleteFiles.inc).
20140505:
/etc/src.conf now affects only builds of the FreeBSD src tree. In the
past, it affected all builds that used the bsd.*.mk files. The old
behavior was a bug, but people may have relied upon it. To get this
behavior back, you can .include /etc/src.conf from /etc/make.conf
(which is still global and isn't changed). This also changes the
behavior of incremental builds inside the tree of individual
directories. Set MAKESYSPATH to ".../share/mk" to do that.
Although this has survived make universe and some upgrade scenarios,
other upgrade scenarios may have broken. At least one form of
temporary breakage was fixed with MAKESYSPATH settings for buildworld
as well... In cases where MAKESYSPATH isn't working with this
setting, you'll need to set it to the full path to your tree.
One side effect of all this cleaning up is that bsd.compiler.mk
is no longer implicitly included by bsd.own.mk. If you wish to
use COMPILER_TYPE, you must now explicitly include bsd.compiler.mk
as well.
20140430:
The lindev device has been removed since /dev/full has been made a
standard device. __FreeBSD_version has been bumped.
20140424:
The knob WITHOUT_VI was added to the base system, which controls
building ex(1), vi(1), etc. Older releases of FreeBSD required ex(1)
in order to reorder files share/termcap and didn't build ex(1) as a
build tool, so building/installing with WITH_VI is highly advised for
build hosts for older releases.
This issue has been fixed in stable/9 and stable/10 in r277022 and
r276991, respectively.
20140418:
The YES_HESIOD knob has been removed. It has been obsolete for
a decade. Please move to using WITH_HESIOD instead or your builds
will silently lack HESIOD.
20140405:
The uart(4) driver has been changed with respect to its handling
of the low-level console. Previously the uart(4) driver prevented
any process from changing the baudrate or the CLOCAL and HUPCL
control flags. By removing the restrictions, operators can make
changes to the serial console port without having to reboot.
However, when getty(8) is started on the serial device that is
associated with the low-level console, a misconfigured terminal
line in /etc/ttys will now have a real impact.
Before upgrading the kernel, make sure that /etc/ttys has the
serial console device configured as 3wire without baudrate to
preserve the previous behaviour. E.g:
ttyu0 "/usr/libexec/getty 3wire" vt100 on secure
20140306:
Support for libwrap (TCP wrappers) in rpcbind was disabled by default
to improve performance. To re-enable it, if needed, run rpcbind
with command line option -W.
20140226:
Switched back to the GPL dtc compiler due to updates in the upstream
dts files not being supported by the BSDL dtc compiler. You will need
to rebuild your kernel toolchain to pick up the new compiler. Core dumps
may result while building dtb files during a kernel build if you fail
to do so. Set WITHOUT_GPL_DTC if you require the BSDL compiler.
20140216:
Clang and llvm have been upgraded to 3.4 release.
20140216:
The nve(4) driver has been removed. Please use the nfe(4) driver
for NVIDIA nForce MCP Ethernet adapters instead.
20140212:
An ABI incompatibility crept into the libc++ 3.4 import in r261283.
This could cause certain C++ applications using shared libraries built
against the previous version of libc++ to crash. The incompatibility
has now been fixed, but any C++ applications or shared libraries built
between r261283 and r261801 should be recompiled.
20140204:
OpenSSH will now ignore errors caused by kernel lacking of Capsicum
capability mode support. Please note that enabling the feature in
kernel is still highly recommended.
20140131:
OpenSSH is now built with sandbox support, and will use sandbox as
the default privilege separation method. This requires Capsicum
capability mode support in kernel.
20140128:
The libelf and libdwarf libraries have been updated to newer
versions from upstream. Shared library version numbers for
these two libraries were bumped. Any ports or binaries
requiring these two libraries should be recompiled.
__FreeBSD_version is bumped to 1100006.
20140110:
If a Makefile in a tests/ directory was auto-generating a Kyuafile
instead of providing an explicit one, this would prevent such
Makefile from providing its own Kyuafile in the future during
NO_CLEAN builds. This has been fixed in the Makefiles but manual
intervention is needed to clean an objdir if you use NO_CLEAN:
# find /usr/obj -name Kyuafile | xargs rm -f
20131213:
The behavior of gss_pseudo_random() for the krb5 mechanism
has changed, for applications requesting a longer random string
than produced by the underlying enctype's pseudo-random() function.
In particular, the random string produced from a session key of
enctype aes256-cts-hmac-sha1-96 or aes256-cts-hmac-sha1-96 will
be different at the 17th octet and later, after this change.
The counter used in the PRF+ construction is now encoded as a
big-endian integer in accordance with RFC 4402.
__FreeBSD_version is bumped to 1100004.
20131108:
The WITHOUT_ATF build knob has been removed and its functionality
has been subsumed into the more generic WITHOUT_TESTS. If you were
using the former to disable the build of the ATF libraries, you
should change your settings to use the latter.
20131025:
The default version of mtree is nmtree which is obtained from
NetBSD. The output is generally the same, but may vary
slightly. If you found you need identical output adding
"-F freebsd9" to the command line should do the trick. For the
time being, the old mtree is available as fmtree.
20131014:
libbsdyml has been renamed to libyaml and moved to /usr/lib/private.
This will break ports-mgmt/pkg. Rebuild the port, or upgrade to pkg
1.1.4_8 and verify bsdyml not linked in, before running "make
delete-old-libs":
# make -C /usr/ports/ports-mgmt/pkg build deinstall install clean
or
# pkg install pkg; ldd /usr/local/sbin/pkg | grep bsdyml
20131010:
The stable/10 branch has been created in subversion from head
revision r256279.
COMMON ITEMS:
General Notes
-------------
Avoid using make -j when upgrading. While generally safe, there are
sometimes problems using -j to upgrade. If your upgrade fails with
-j, please try again without -j. From time to time in the past there
have been problems using -j with buildworld and/or installworld. This
is especially true when upgrading between "distant" versions (eg one
that cross a major release boundary or several minor releases, or when
several months have passed on the -current branch).
Sometimes, obscure build problems are the result of environment
poisoning. This can happen because the make utility reads its
environment when searching for values for global variables. To run
your build attempts in an "environmental clean room", prefix all make
commands with 'env -i '. See the env(1) manual page for more details.
When upgrading from one major version to another it is generally best to
upgrade to the latest code in the currently installed branch first, then
do an upgrade to the new branch. This is the best-tested upgrade path,
and has the highest probability of being successful. Please try this
approach if you encounter problems with a major version upgrade. Since
the stable 4.x branch point, one has generally been able to upgrade from
anywhere in the most recent stable branch to head / current (or even the
last couple of stable branches). See the top of this file when there's
an exception.
When upgrading a live system, having a root shell around before
installing anything can help undo problems. Not having a root shell
around can lead to problems if pam has changed too much from your
starting point to allow continued authentication after the upgrade.
This file should be read as a log of events. When a later event changes
information of a prior event, the prior event should not be deleted.
Instead, a pointer to the entry with the new information should be
placed in the old entry. Readers of this file should also sanity check
older entries before relying on them blindly. Authors of new entries
should write them with this in mind.
ZFS notes
---------
When upgrading the boot ZFS pool to a new version, always follow
these two steps:
1.) recompile and reinstall the ZFS boot loader and boot block
(this is part of "make buildworld" and "make installworld")
2.) update the ZFS boot block on your boot drive
The following example updates the ZFS boot block on the first
partition (freebsd-boot) of a GPT partitioned drive ada0:
"gpart bootcode -p /boot/gptzfsboot -i 1 ada0"
Non-boot pools do not need these updates.
To build a kernel
-----------------
If you are updating from a prior version of FreeBSD (even one just
a few days old), you should follow this procedure. It is the most
failsafe as it uses a /usr/obj tree with a fresh mini-buildworld,
make kernel-toolchain
make -DALWAYS_CHECK_MAKE buildkernel KERNCONF=YOUR_KERNEL_HERE
make -DALWAYS_CHECK_MAKE installkernel KERNCONF=YOUR_KERNEL_HERE
To test a kernel once
---------------------
If you just want to boot a kernel once (because you are not sure
if it works, or if you want to boot a known bad kernel to provide
debugging information) run
make installkernel KERNCONF=YOUR_KERNEL_HERE KODIR=/boot/testkernel
nextboot -k testkernel
To rebuild everything and install it on the current system.
-----------------------------------------------------------
# Note: sometimes if you are running current you gotta do more than
# is listed here if you are upgrading from a really old current.
<make sure you have good level 0 dumps>
make buildworld
make buildkernel KERNCONF=YOUR_KERNEL_HERE
make installkernel KERNCONF=YOUR_KERNEL_HERE
[1]
<reboot in single user> [3]
mergemaster -Fp [5]
make installworld
mergemaster -Fi [4]
make delete-old [6]
<reboot>
To cross-install current onto a separate partition
--------------------------------------------------
# In this approach we use a separate partition to hold
# current's root, 'usr', and 'var' directories. A partition
# holding "/", "/usr" and "/var" should be about 2GB in
# size.
<make sure you have good level 0 dumps>
<boot into -stable>
make buildworld
make buildkernel KERNCONF=YOUR_KERNEL_HERE
<maybe newfs current's root partition>
<mount current's root partition on directory ${CURRENT_ROOT}>
make installworld DESTDIR=${CURRENT_ROOT} -DDB_FROM_SRC
make distribution DESTDIR=${CURRENT_ROOT} # if newfs'd
make installkernel KERNCONF=YOUR_KERNEL_HERE DESTDIR=${CURRENT_ROOT}
cp /etc/fstab ${CURRENT_ROOT}/etc/fstab # if newfs'd
<edit ${CURRENT_ROOT}/etc/fstab to mount "/" from the correct partition>
<reboot into current>
<do a "native" rebuild/install as described in the previous section>
<maybe install compatibility libraries from ports/misc/compat*>
<reboot>
To upgrade in-place from stable to current
----------------------------------------------
<make sure you have good level 0 dumps>
make buildworld [9]
make buildkernel KERNCONF=YOUR_KERNEL_HERE [8]
make installkernel KERNCONF=YOUR_KERNEL_HERE
[1]
<reboot in single user> [3]
mergemaster -Fp [5]
make installworld
mergemaster -Fi [4]
make delete-old [6]
<reboot>
Make sure that you've read the UPDATING file to understand the
tweaks to various things you need. At this point in the life
cycle of current, things change often and you are on your own
to cope. The defaults can also change, so please read ALL of
the UPDATING entries.
Also, if you are tracking -current, you must be subscribed to
freebsd-current@freebsd.org. Make sure that before you update
your sources that you have read and understood all the recent
messages there. If in doubt, please track -stable which has
much fewer pitfalls.
[1] If you have third party modules, such as vmware, you
should disable them at this point so they don't crash your
system on reboot.
[3] From the bootblocks, boot -s, and then do
fsck -p
mount -u /
mount -a
cd src
adjkerntz -i # if CMOS is wall time
Also, when doing a major release upgrade, it is required that
you boot into single user mode to do the installworld.
[4] Note: This step is non-optional. Failure to do this step
can result in a significant reduction in the functionality of the
system. Attempting to do it by hand is not recommended and those
that pursue this avenue should read this file carefully, as well
as the archives of freebsd-current and freebsd-hackers mailing lists
for potential gotchas. The -U option is also useful to consider.
See mergemaster(8) for more information.
[5] Usually this step is a no-op. However, from time to time
you may need to do this if you get unknown user in the following
step. It never hurts to do it all the time. You may need to
install a new mergemaster (cd src/usr.sbin/mergemaster && make
install) after the buildworld before this step if you last updated
from current before 20130425 or from -stable before 20130430.
[6] This only deletes old files and directories. Old libraries
can be deleted by "make delete-old-libs", but you have to make
sure that no program is using those libraries anymore.
[8] The new kernel must be able to run existing binaries used by
an installworld. When upgrading across major versions, the new
kernel's configuration must include the correct COMPAT_FREEBSD<n>
option for existing binaries (e.g. COMPAT_FREEBSD11 to run 11.x
binaries). Failure to do so may leave you with a system that is
hard to boot to recover. A GENERIC kernel will include suitable
compatibility options to run binaries from older branches.
Make sure that you merge any new devices from GENERIC since the
last time you updated your kernel config file.
[9] If CPUTYPE is defined in your /etc/make.conf, make sure to use the
"?=" instead of the "=" assignment operator, so that buildworld can
override the CPUTYPE if it needs to.
MAKEOBJDIRPREFIX must be defined in an environment variable, and
not on the command line, or in /etc/make.conf. buildworld will
warn if it is improperly defined.
FORMAT:
This file contains a list, in reverse chronological order, of major
breakages in tracking -current. It is not guaranteed to be a complete
list of such breakages, and only contains entries since September 23, 2011.
If you need to see UPDATING entries from before that date, you will need
to fetch an UPDATING file from an older FreeBSD release.
Copyright information:
Copyright 1998-2009 M. Warner Losh. All Rights Reserved.
Redistribution, publication, translation and use, with or without
modification, in full or in part, in any form or format of this
document are permitted without further permission from the author.
THIS DOCUMENT IS PROVIDED BY WARNER LOSH ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL WARNER LOSH BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Contact Warner Losh if you have any questions about your use of
this document.
$FreeBSD$
Index: stable/12/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
===================================================================
--- stable/12/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h (revision 356464)
+++ stable/12/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h (revision 356465)
@@ -1,716 +1,714 @@
//==-- CGFunctionInfo.h - Representation of function argument/return types -==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines CGFunctionInfo and associated types used in representing the
// LLVM source types and ABI-coerced types for function arguments and
// return values.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_CODEGEN_CGFUNCTIONINFO_H
#define LLVM_CLANG_CODEGEN_CGFUNCTIONINFO_H
#include "clang/AST/Attr.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Type.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
namespace clang {
namespace CodeGen {
/// ABIArgInfo - Helper class to encapsulate information about how a
/// specific C type should be passed to or returned from a function.
class ABIArgInfo {
public:
enum Kind : uint8_t {
/// Direct - Pass the argument directly using the normal converted LLVM
/// type, or by coercing to another specified type stored in
/// 'CoerceToType'). If an offset is specified (in UIntData), then the
/// argument passed is offset by some number of bytes in the memory
/// representation. A dummy argument is emitted before the real argument
/// if the specified type stored in "PaddingType" is not zero.
Direct,
/// Extend - Valid only for integer argument types. Same as 'direct'
/// but also emit a zero/sign extension attribute.
Extend,
/// Indirect - Pass the argument indirectly via a hidden pointer
/// with the specified alignment (0 indicates default alignment).
Indirect,
/// Ignore - Ignore the argument (treat as void). Useful for void and
/// empty structs.
Ignore,
/// Expand - Only valid for aggregate argument types. The structure should
/// be expanded into consecutive arguments for its constituent fields.
/// Currently expand is only allowed on structures whose fields
/// are all scalar types or are themselves expandable types.
Expand,
/// CoerceAndExpand - Only valid for aggregate argument types. The
/// structure should be expanded into consecutive arguments corresponding
/// to the non-array elements of the type stored in CoerceToType.
/// Array elements in the type are assumed to be padding and skipped.
CoerceAndExpand,
/// InAlloca - Pass the argument directly using the LLVM inalloca attribute.
/// This is similar to indirect with byval, except it only applies to
/// arguments stored in memory and forbids any implicit copies. When
/// applied to a return type, it means the value is returned indirectly via
/// an implicit sret parameter stored in the argument struct.
InAlloca,
KindFirst = Direct,
KindLast = InAlloca
};
private:
llvm::Type *TypeData; // canHaveCoerceToType()
union {
llvm::Type *PaddingType; // canHavePaddingType()
llvm::Type *UnpaddedCoerceAndExpandType; // isCoerceAndExpand()
};
union {
unsigned DirectOffset; // isDirect() || isExtend()
unsigned IndirectAlign; // isIndirect()
unsigned AllocaFieldIndex; // isInAlloca()
};
Kind TheKind;
bool PaddingInReg : 1;
bool InAllocaSRet : 1; // isInAlloca()
bool IndirectByVal : 1; // isIndirect()
bool IndirectRealign : 1; // isIndirect()
bool SRetAfterThis : 1; // isIndirect()
bool InReg : 1; // isDirect() || isExtend() || isIndirect()
bool CanBeFlattened: 1; // isDirect()
bool SignExt : 1; // isExtend()
bool canHavePaddingType() const {
return isDirect() || isExtend() || isIndirect() || isExpand();
}
void setPaddingType(llvm::Type *T) {
assert(canHavePaddingType());
PaddingType = T;
}
void setUnpaddedCoerceToType(llvm::Type *T) {
assert(isCoerceAndExpand());
UnpaddedCoerceAndExpandType = T;
}
- ABIArgInfo(Kind K)
- : TheKind(K), PaddingInReg(false), InReg(false) {
- }
-
public:
- ABIArgInfo()
+ ABIArgInfo(Kind K = Direct)
: TypeData(nullptr), PaddingType(nullptr), DirectOffset(0),
- TheKind(Direct), PaddingInReg(false), InReg(false) {}
+ TheKind(K), PaddingInReg(false), InAllocaSRet(false),
+ IndirectByVal(false), IndirectRealign(false), SRetAfterThis(false),
+ InReg(false), CanBeFlattened(false), SignExt(false) {}
static ABIArgInfo getDirect(llvm::Type *T = nullptr, unsigned Offset = 0,
llvm::Type *Padding = nullptr,
bool CanBeFlattened = true) {
auto AI = ABIArgInfo(Direct);
AI.setCoerceToType(T);
AI.setPaddingType(Padding);
AI.setDirectOffset(Offset);
AI.setCanBeFlattened(CanBeFlattened);
return AI;
}
static ABIArgInfo getDirectInReg(llvm::Type *T = nullptr) {
auto AI = getDirect(T);
AI.setInReg(true);
return AI;
}
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T = nullptr) {
assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType");
auto AI = ABIArgInfo(Extend);
AI.setCoerceToType(T);
AI.setPaddingType(nullptr);
AI.setDirectOffset(0);
AI.setSignExt(true);
return AI;
}
static ABIArgInfo getZeroExtend(QualType Ty, llvm::Type *T = nullptr) {
assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType");
auto AI = ABIArgInfo(Extend);
AI.setCoerceToType(T);
AI.setPaddingType(nullptr);
AI.setDirectOffset(0);
AI.setSignExt(false);
return AI;
}
// ABIArgInfo will record the argument as being extended based on the sign
// of its type.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T = nullptr) {
assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType");
if (Ty->hasSignedIntegerRepresentation())
return getSignExtend(Ty, T);
return getZeroExtend(Ty, T);
}
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T = nullptr) {
auto AI = getExtend(Ty, T);
AI.setInReg(true);
return AI;
}
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal = true,
bool Realign = false,
llvm::Type *Padding = nullptr) {
auto AI = ABIArgInfo(Indirect);
AI.setIndirectAlign(Alignment);
AI.setIndirectByVal(ByVal);
AI.setIndirectRealign(Realign);
AI.setSRetAfterThis(false);
AI.setPaddingType(Padding);
return AI;
}
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal = true,
bool Realign = false) {
auto AI = getIndirect(Alignment, ByVal, Realign);
AI.setInReg(true);
return AI;
}
static ABIArgInfo getInAlloca(unsigned FieldIndex) {
auto AI = ABIArgInfo(InAlloca);
AI.setInAllocaFieldIndex(FieldIndex);
return AI;
}
static ABIArgInfo getExpand() {
auto AI = ABIArgInfo(Expand);
AI.setPaddingType(nullptr);
return AI;
}
static ABIArgInfo getExpandWithPadding(bool PaddingInReg,
llvm::Type *Padding) {
auto AI = getExpand();
AI.setPaddingInReg(PaddingInReg);
AI.setPaddingType(Padding);
return AI;
}
/// \param unpaddedCoerceToType The coerce-to type with padding elements
/// removed, canonicalized to a single element if it would otherwise
/// have exactly one element.
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType,
llvm::Type *unpaddedCoerceToType) {
#ifndef NDEBUG
// Sanity checks on unpaddedCoerceToType.
// Assert that we only have a struct type if there are multiple elements.
auto unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoerceToType);
assert(!unpaddedStruct || unpaddedStruct->getNumElements() != 1);
// Assert that all the non-padding elements have a corresponding element
// in the unpadded type.
unsigned unpaddedIndex = 0;
for (auto eltType : coerceToType->elements()) {
if (isPaddingForCoerceAndExpand(eltType)) continue;
if (unpaddedStruct) {
assert(unpaddedStruct->getElementType(unpaddedIndex) == eltType);
} else {
assert(unpaddedIndex == 0 && unpaddedCoerceToType == eltType);
}
unpaddedIndex++;
}
// Assert that there aren't extra elements in the unpadded type.
if (unpaddedStruct) {
assert(unpaddedStruct->getNumElements() == unpaddedIndex);
} else {
assert(unpaddedIndex == 1);
}
#endif
auto AI = ABIArgInfo(CoerceAndExpand);
AI.setCoerceToType(coerceToType);
AI.setUnpaddedCoerceToType(unpaddedCoerceToType);
return AI;
}
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType) {
if (eltType->isArrayTy()) {
assert(eltType->getArrayElementType()->isIntegerTy(8));
return true;
} else {
return false;
}
}
Kind getKind() const { return TheKind; }
bool isDirect() const { return TheKind == Direct; }
bool isInAlloca() const { return TheKind == InAlloca; }
bool isExtend() const { return TheKind == Extend; }
bool isIgnore() const { return TheKind == Ignore; }
bool isIndirect() const { return TheKind == Indirect; }
bool isExpand() const { return TheKind == Expand; }
bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; }
bool canHaveCoerceToType() const {
return isDirect() || isExtend() || isCoerceAndExpand();
}
// Direct/Extend accessors
unsigned getDirectOffset() const {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
return DirectOffset;
}
void setDirectOffset(unsigned Offset) {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
DirectOffset = Offset;
}
bool isSignExt() const {
assert(isExtend() && "Invalid kind!");
return SignExt;
}
void setSignExt(bool SExt) {
assert(isExtend() && "Invalid kind!");
SignExt = SExt;
}
llvm::Type *getPaddingType() const {
return (canHavePaddingType() ? PaddingType : nullptr);
}
bool getPaddingInReg() const {
return PaddingInReg;
}
void setPaddingInReg(bool PIR) {
PaddingInReg = PIR;
}
llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
void setCoerceToType(llvm::Type *T) {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
llvm::StructType *getCoerceAndExpandType() const {
assert(isCoerceAndExpand());
return cast<llvm::StructType>(TypeData);
}
llvm::Type *getUnpaddedCoerceAndExpandType() const {
assert(isCoerceAndExpand());
return UnpaddedCoerceAndExpandType;
}
ArrayRef<llvm::Type *>getCoerceAndExpandTypeSequence() const {
assert(isCoerceAndExpand());
if (auto structTy =
dyn_cast<llvm::StructType>(UnpaddedCoerceAndExpandType)) {
return structTy->elements();
} else {
return llvm::makeArrayRef(&UnpaddedCoerceAndExpandType, 1);
}
}
bool getInReg() const {
assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
return InReg;
}
void setInReg(bool IR) {
assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
InReg = IR;
}
// Indirect accessors
CharUnits getIndirectAlign() const {
assert(isIndirect() && "Invalid kind!");
return CharUnits::fromQuantity(IndirectAlign);
}
void setIndirectAlign(CharUnits IA) {
assert(isIndirect() && "Invalid kind!");
IndirectAlign = IA.getQuantity();
}
bool getIndirectByVal() const {
assert(isIndirect() && "Invalid kind!");
return IndirectByVal;
}
void setIndirectByVal(bool IBV) {
assert(isIndirect() && "Invalid kind!");
IndirectByVal = IBV;
}
bool getIndirectRealign() const {
assert(isIndirect() && "Invalid kind!");
return IndirectRealign;
}
void setIndirectRealign(bool IR) {
assert(isIndirect() && "Invalid kind!");
IndirectRealign = IR;
}
bool isSRetAfterThis() const {
assert(isIndirect() && "Invalid kind!");
return SRetAfterThis;
}
void setSRetAfterThis(bool AfterThis) {
assert(isIndirect() && "Invalid kind!");
SRetAfterThis = AfterThis;
}
unsigned getInAllocaFieldIndex() const {
assert(isInAlloca() && "Invalid kind!");
return AllocaFieldIndex;
}
void setInAllocaFieldIndex(unsigned FieldIndex) {
assert(isInAlloca() && "Invalid kind!");
AllocaFieldIndex = FieldIndex;
}
/// Return true if this field of an inalloca struct should be returned
/// to implement a struct return calling convention.
bool getInAllocaSRet() const {
assert(isInAlloca() && "Invalid kind!");
return InAllocaSRet;
}
void setInAllocaSRet(bool SRet) {
assert(isInAlloca() && "Invalid kind!");
InAllocaSRet = SRet;
}
bool getCanBeFlattened() const {
assert(isDirect() && "Invalid kind!");
return CanBeFlattened;
}
void setCanBeFlattened(bool Flatten) {
assert(isDirect() && "Invalid kind!");
CanBeFlattened = Flatten;
}
void dump() const;
};
/// A class for recording the number of arguments that a function
/// signature requires.
class RequiredArgs {
/// The number of required arguments, or ~0 if the signature does
/// not permit optional arguments.
unsigned NumRequired;
public:
enum All_t { All };
RequiredArgs(All_t _) : NumRequired(~0U) {}
explicit RequiredArgs(unsigned n) : NumRequired(n) {
assert(n != ~0U);
}
/// Compute the arguments required by the given formal prototype,
/// given that there may be some additional, non-formal arguments
/// in play.
///
/// If FD is not null, this will consider pass_object_size params in FD.
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
unsigned additional) {
if (!prototype->isVariadic()) return All;
if (prototype->hasExtParameterInfos())
additional += llvm::count_if(
prototype->getExtParameterInfos(),
[](const FunctionProtoType::ExtParameterInfo &ExtInfo) {
return ExtInfo.hasPassObjectSize();
});
return RequiredArgs(prototype->getNumParams() + additional);
}
static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
unsigned additional) {
return forPrototypePlus(prototype.getTypePtr(), additional);
}
static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
return forPrototypePlus(prototype, 0);
}
static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
return forPrototypePlus(prototype.getTypePtr(), 0);
}
bool allowsOptionalArgs() const { return NumRequired != ~0U; }
unsigned getNumRequiredArgs() const {
assert(allowsOptionalArgs());
return NumRequired;
}
unsigned getOpaqueData() const { return NumRequired; }
static RequiredArgs getFromOpaqueData(unsigned value) {
if (value == ~0U) return All;
return RequiredArgs(value);
}
};
// Implementation detail of CGFunctionInfo, factored out so it can be named
// in the TrailingObjects base class of CGFunctionInfo.
struct CGFunctionInfoArgInfo {
CanQualType type;
ABIArgInfo info;
};
/// CGFunctionInfo - Class to encapsulate the information about a
/// function definition.
class CGFunctionInfo final
: public llvm::FoldingSetNode,
private llvm::TrailingObjects<CGFunctionInfo, CGFunctionInfoArgInfo,
FunctionProtoType::ExtParameterInfo> {
typedef CGFunctionInfoArgInfo ArgInfo;
typedef FunctionProtoType::ExtParameterInfo ExtParameterInfo;
/// The LLVM::CallingConv to use for this function (as specified by the
/// user).
unsigned CallingConvention : 8;
/// The LLVM::CallingConv to actually use for this function, which may
/// depend on the ABI.
unsigned EffectiveCallingConvention : 8;
/// The clang::CallingConv that this was originally created with.
unsigned ASTCallingConvention : 6;
/// Whether this is an instance method.
unsigned InstanceMethod : 1;
/// Whether this is a chain call.
unsigned ChainCall : 1;
/// Whether this function is noreturn.
unsigned NoReturn : 1;
/// Whether this function is returns-retained.
unsigned ReturnsRetained : 1;
/// Whether this function saved caller registers.
unsigned NoCallerSavedRegs : 1;
/// How many arguments to pass inreg.
unsigned HasRegParm : 1;
unsigned RegParm : 3;
/// Whether this function has nocf_check attribute.
unsigned NoCfCheck : 1;
RequiredArgs Required;
/// The struct representing all arguments passed in memory. Only used when
/// passing non-trivial types with inalloca. Not part of the profile.
llvm::StructType *ArgStruct;
unsigned ArgStructAlign : 31;
unsigned HasExtParameterInfos : 1;
unsigned NumArgs;
ArgInfo *getArgsBuffer() {
return getTrailingObjects<ArgInfo>();
}
const ArgInfo *getArgsBuffer() const {
return getTrailingObjects<ArgInfo>();
}
ExtParameterInfo *getExtParameterInfosBuffer() {
return getTrailingObjects<ExtParameterInfo>();
}
const ExtParameterInfo *getExtParameterInfosBuffer() const{
return getTrailingObjects<ExtParameterInfo>();
}
CGFunctionInfo() : Required(RequiredArgs::All) {}
public:
static CGFunctionInfo *create(unsigned llvmCC,
bool instanceMethod,
bool chainCall,
const FunctionType::ExtInfo &extInfo,
ArrayRef<ExtParameterInfo> paramInfos,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
RequiredArgs required);
void operator delete(void *p) { ::operator delete(p); }
// Friending class TrailingObjects is apparently not good enough for MSVC,
// so these have to be public.
friend class TrailingObjects;
size_t numTrailingObjects(OverloadToken<ArgInfo>) const {
return NumArgs + 1;
}
size_t numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
return (HasExtParameterInfos ? NumArgs : 0);
}
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
typedef llvm::iterator_range<arg_iterator> arg_range;
typedef llvm::iterator_range<const_arg_iterator> const_arg_range;
arg_range arguments() { return arg_range(arg_begin(), arg_end()); }
const_arg_range arguments() const {
return const_arg_range(arg_begin(), arg_end());
}
const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
arg_iterator arg_begin() { return getArgsBuffer() + 1; }
arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
unsigned arg_size() const { return NumArgs; }
bool isVariadic() const { return Required.allowsOptionalArgs(); }
RequiredArgs getRequiredArgs() const { return Required; }
unsigned getNumRequiredArgs() const {
return isVariadic() ? getRequiredArgs().getNumRequiredArgs() : arg_size();
}
bool isInstanceMethod() const { return InstanceMethod; }
bool isChainCall() const { return ChainCall; }
bool isNoReturn() const { return NoReturn; }
/// In ARC, whether this function retains its return value. This
/// is not always reliable for call sites.
bool isReturnsRetained() const { return ReturnsRetained; }
/// Whether this function no longer saves caller registers.
bool isNoCallerSavedRegs() const { return NoCallerSavedRegs; }
/// Whether this function has nocf_check attribute.
bool isNoCfCheck() const { return NoCfCheck; }
/// getASTCallingConvention() - Return the AST-specified calling
/// convention.
CallingConv getASTCallingConvention() const {
return CallingConv(ASTCallingConvention);
}
/// getCallingConvention - Return the user specified calling
/// convention, which has been translated into an LLVM CC.
unsigned getCallingConvention() const { return CallingConvention; }
/// getEffectiveCallingConvention - Return the actual calling convention to
/// use, which may depend on the ABI.
unsigned getEffectiveCallingConvention() const {
return EffectiveCallingConvention;
}
void setEffectiveCallingConvention(unsigned Value) {
EffectiveCallingConvention = Value;
}
bool getHasRegParm() const { return HasRegParm; }
unsigned getRegParm() const { return RegParm; }
FunctionType::ExtInfo getExtInfo() const {
return FunctionType::ExtInfo(isNoReturn(), getHasRegParm(), getRegParm(),
getASTCallingConvention(), isReturnsRetained(),
isNoCallerSavedRegs(), isNoCfCheck());
}
CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
if (!HasExtParameterInfos) return {};
return llvm::makeArrayRef(getExtParameterInfosBuffer(), NumArgs);
}
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const {
assert(argIndex <= NumArgs);
if (!HasExtParameterInfos) return ExtParameterInfo();
return getExtParameterInfos()[argIndex];
}
/// Return true if this function uses inalloca arguments.
bool usesInAlloca() const { return ArgStruct; }
/// Get the struct type used to represent all the arguments in memory.
llvm::StructType *getArgStruct() const { return ArgStruct; }
CharUnits getArgStructAlignment() const {
return CharUnits::fromQuantity(ArgStructAlign);
}
void setArgStruct(llvm::StructType *Ty, CharUnits Align) {
ArgStruct = Ty;
ArgStructAlign = Align.getQuantity();
}
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getASTCallingConvention());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
ID.AddBoolean(NoReturn);
ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(NoCallerSavedRegs);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
ID.AddBoolean(NoCfCheck);
ID.AddInteger(Required.getOpaqueData());
ID.AddBoolean(HasExtParameterInfos);
if (HasExtParameterInfos) {
for (auto paramInfo : getExtParameterInfos())
ID.AddInteger(paramInfo.getOpaqueValue());
}
getReturnType().Profile(ID);
for (const auto &I : arguments())
I.type.Profile(ID);
}
static void Profile(llvm::FoldingSetNodeID &ID,
bool InstanceMethod,
bool ChainCall,
const FunctionType::ExtInfo &info,
ArrayRef<ExtParameterInfo> paramInfos,
RequiredArgs required,
CanQualType resultType,
ArrayRef<CanQualType> argTypes) {
ID.AddInteger(info.getCC());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
ID.AddBoolean(info.getNoReturn());
ID.AddBoolean(info.getProducesResult());
ID.AddBoolean(info.getNoCallerSavedRegs());
ID.AddBoolean(info.getHasRegParm());
ID.AddInteger(info.getRegParm());
ID.AddBoolean(info.getNoCfCheck());
ID.AddInteger(required.getOpaqueData());
ID.AddBoolean(!paramInfos.empty());
if (!paramInfos.empty()) {
for (auto paramInfo : paramInfos)
ID.AddInteger(paramInfo.getOpaqueValue());
}
resultType.Profile(ID);
for (ArrayRef<CanQualType>::iterator
i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
i->Profile(ID);
}
}
};
} // end namespace CodeGen
} // end namespace clang
#endif
Index: stable/12/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp (revision 356465)
@@ -1,1171 +1,1172 @@
//===--- ARM.cpp - Implement ARM target feature support -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements ARM TargetInfo objects.
//
//===----------------------------------------------------------------------===//
#include "ARM.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
using namespace clang::targets;
void ARMTargetInfo::setABIAAPCS() {
IsAAPCS = true;
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
const llvm::Triple &T = getTriple();
bool IsNetBSD = T.isOSNetBSD();
bool IsOpenBSD = T.isOSOpenBSD();
if (!T.isOSWindows() && !IsNetBSD && !IsOpenBSD)
WCharType = UnsignedInt;
UseBitFieldTypeAlignment = true;
ZeroLengthBitfieldBoundary = 0;
// Thumb1 add sp, #imm requires the immediate value be multiple of 4,
// so set preferred for small types to 32.
if (T.isOSBinFormatMachO()) {
resetDataLayout(BigEndian
? "E-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
: "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
} else if (T.isOSWindows()) {
assert(!BigEndian && "Windows on ARM does not support big endian");
resetDataLayout("e"
"-m:w"
"-p:32:32"
"-Fi8"
"-i64:64"
"-v128:64:128"
"-a:0:32"
"-n32"
"-S64");
} else if (T.isOSNaCl()) {
assert(!BigEndian && "NaCl on ARM does not support big endian");
resetDataLayout("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S128");
} else {
resetDataLayout(BigEndian
? "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
}
// FIXME: Enumerated types are variable width in straight AAPCS.
}
void ARMTargetInfo::setABIAPCS(bool IsAAPCS16) {
const llvm::Triple &T = getTriple();
IsAAPCS = false;
if (IsAAPCS16)
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
else
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
WCharType = SignedInt;
// Do not respect the alignment of bit-field types when laying out
// structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
UseBitFieldTypeAlignment = false;
/// gcc forces the alignment to 4 bytes, regardless of the type of the
/// zero length bitfield. This corresponds to EMPTY_FIELD_BOUNDARY in
/// gcc.
ZeroLengthBitfieldBoundary = 32;
if (T.isOSBinFormatMachO() && IsAAPCS16) {
assert(!BigEndian && "AAPCS16 does not support big-endian");
resetDataLayout("e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128");
} else if (T.isOSBinFormatMachO())
resetDataLayout(
BigEndian
? "E-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
else
resetDataLayout(
BigEndian
? "E-m:e-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
: "e-m:e-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
// FIXME: Override "preferred align" for double and long long.
}
void ARMTargetInfo::setArchInfo() {
StringRef ArchName = getTriple().getArchName();
ArchISA = llvm::ARM::parseArchISA(ArchName);
CPU = llvm::ARM::getDefaultCPU(ArchName);
llvm::ARM::ArchKind AK = llvm::ARM::parseArch(ArchName);
if (AK != llvm::ARM::ArchKind::INVALID)
ArchKind = AK;
setArchInfo(ArchKind);
}
void ARMTargetInfo::setArchInfo(llvm::ARM::ArchKind Kind) {
StringRef SubArch;
// cache TargetParser info
ArchKind = Kind;
SubArch = llvm::ARM::getSubArch(ArchKind);
ArchProfile = llvm::ARM::parseArchProfile(SubArch);
ArchVersion = llvm::ARM::parseArchVersion(SubArch);
// cache CPU related strings
CPUAttr = getCPUAttr();
CPUProfile = getCPUProfile();
}
void ARMTargetInfo::setAtomic() {
// when triple does not specify a sub arch,
// then we are not using inline atomics
bool ShouldUseInlineAtomic =
(ArchISA == llvm::ARM::ISAKind::ARM && ArchVersion >= 6) ||
(ArchISA == llvm::ARM::ISAKind::THUMB && ArchVersion >= 7);
// Cortex M does not support 8 byte atomics, while general Thumb2 does.
if (ArchProfile == llvm::ARM::ProfileKind::M) {
MaxAtomicPromoteWidth = 32;
if (ShouldUseInlineAtomic)
MaxAtomicInlineWidth = 32;
} else {
MaxAtomicPromoteWidth = 64;
if (ShouldUseInlineAtomic)
MaxAtomicInlineWidth = 64;
}
}
bool ARMTargetInfo::hasMVE() const {
return ArchKind == llvm::ARM::ArchKind::ARMV8_1MMainline && MVE != 0;
}
bool ARMTargetInfo::hasMVEFloat() const {
return hasMVE() && (MVE & MVE_FP);
}
bool ARMTargetInfo::isThumb() const {
return ArchISA == llvm::ARM::ISAKind::THUMB;
}
bool ARMTargetInfo::supportsThumb() const {
return CPUAttr.count('T') || ArchVersion >= 6;
}
bool ARMTargetInfo::supportsThumb2() const {
return CPUAttr.equals("6T2") ||
(ArchVersion >= 7 && !CPUAttr.equals("8M_BASE"));
}
StringRef ARMTargetInfo::getCPUAttr() const {
// For most sub-arches, the build attribute CPU name is enough.
// For Cortex variants, it's slightly different.
switch (ArchKind) {
default:
return llvm::ARM::getCPUAttr(ArchKind);
case llvm::ARM::ArchKind::ARMV6M:
return "6M";
case llvm::ARM::ArchKind::ARMV7S:
return "7S";
case llvm::ARM::ArchKind::ARMV7A:
return "7A";
case llvm::ARM::ArchKind::ARMV7R:
return "7R";
case llvm::ARM::ArchKind::ARMV7M:
return "7M";
case llvm::ARM::ArchKind::ARMV7EM:
return "7EM";
case llvm::ARM::ArchKind::ARMV7VE:
return "7VE";
case llvm::ARM::ArchKind::ARMV8A:
return "8A";
case llvm::ARM::ArchKind::ARMV8_1A:
return "8_1A";
case llvm::ARM::ArchKind::ARMV8_2A:
return "8_2A";
case llvm::ARM::ArchKind::ARMV8_3A:
return "8_3A";
case llvm::ARM::ArchKind::ARMV8_4A:
return "8_4A";
case llvm::ARM::ArchKind::ARMV8_5A:
return "8_5A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
return "8M_MAIN";
case llvm::ARM::ArchKind::ARMV8R:
return "8R";
case llvm::ARM::ArchKind::ARMV8_1MMainline:
return "8_1M_MAIN";
}
}
StringRef ARMTargetInfo::getCPUProfile() const {
switch (ArchProfile) {
case llvm::ARM::ProfileKind::A:
return "A";
case llvm::ARM::ProfileKind::R:
return "R";
case llvm::ARM::ProfileKind::M:
return "M";
default:
return "";
}
}
ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
bool IsOpenBSD = Triple.isOSOpenBSD();
bool IsNetBSD = Triple.isOSNetBSD();
// FIXME: the isOSBinFormatMachO is a workaround for identifying a Darwin-like
// environment where size_t is `unsigned long` rather than `unsigned int`
PtrDiffType = IntPtrType =
(Triple.isOSDarwin() || Triple.isOSBinFormatMachO() || IsOpenBSD ||
IsNetBSD)
? SignedLong
: SignedInt;
SizeType = (Triple.isOSDarwin() || Triple.isOSBinFormatMachO() || IsOpenBSD ||
IsNetBSD)
? UnsignedLong
: UnsignedInt;
// ptrdiff_t is inconsistent on Darwin
if ((Triple.isOSDarwin() || Triple.isOSBinFormatMachO()) &&
!Triple.isWatchABI())
PtrDiffType = SignedInt;
// Cache arch related info.
setArchInfo();
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
// FIXME: This duplicates code from the driver that sets the -target-abi
// option - this code is used if -target-abi isn't passed and should
// be unified in some way.
if (Triple.isOSBinFormatMachO()) {
// The backend is hardwired to assume AAPCS for M-class processors, ensure
// the frontend matches that.
if (Triple.getEnvironment() == llvm::Triple::EABI ||
Triple.getOS() == llvm::Triple::UnknownOS ||
ArchProfile == llvm::ARM::ProfileKind::M) {
setABI("aapcs");
} else if (Triple.isWatchABI()) {
setABI("aapcs16");
} else {
setABI("apcs-gnu");
}
} else if (Triple.isOSWindows()) {
// FIXME: this is invalid for WindowsCE
setABI("aapcs");
} else {
// Select the default based on the platform.
switch (Triple.getEnvironment()) {
case llvm::Triple::Android:
case llvm::Triple::GNUEABI:
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABI:
case llvm::Triple::MuslEABIHF:
setABI("aapcs-linux");
break;
case llvm::Triple::EABIHF:
case llvm::Triple::EABI:
setABI("aapcs");
break;
case llvm::Triple::GNU:
setABI("apcs-gnu");
break;
default:
if (IsNetBSD)
setABI("apcs-gnu");
else if (IsOpenBSD)
setABI("aapcs-linux");
else
setABI("aapcs");
break;
}
}
// ARM targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericARM);
// ARM has atomics up to 8 bytes
setAtomic();
// Maximum alignment for ARM NEON data types should be 64-bits (AAPCS)
if (IsAAPCS && (Triple.getEnvironment() != llvm::Triple::Android))
MaxVectorAlign = 64;
// Do force alignment of members that follow zero length bitfields. If
// the alignment of the zero-length bitfield is greater than the member
// that follows it, `bar', `bar' will be aligned as the type of the
// zero length bitfield.
UseZeroLengthBitfieldAlignment = true;
if (Triple.getOS() == llvm::Triple::Linux ||
Triple.getOS() == llvm::Triple::UnknownOS)
this->MCountName = Opts.EABIVersion == llvm::EABI::GNU
? "\01__gnu_mcount_nc"
: "\01mcount";
SoftFloatABI = llvm::is_contained(Opts.FeaturesAsWritten, "+soft-float-abi");
}
StringRef ARMTargetInfo::getABI() const { return ABI; }
bool ARMTargetInfo::setABI(const std::string &Name) {
ABI = Name;
// The defaults (above) are for AAPCS, check if we need to change them.
//
// FIXME: We need support for -meabi... we could just mangle it into the
// name.
if (Name == "apcs-gnu" || Name == "aapcs16") {
setABIAPCS(Name == "aapcs16");
return true;
}
if (Name == "aapcs" || Name == "aapcs-vfp" || Name == "aapcs-linux") {
setABIAAPCS();
return true;
}
return false;
}
// FIXME: This should be based on Arch attributes, not CPU names.
bool ARMTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
std::string ArchFeature;
std::vector<StringRef> TargetFeatures;
llvm::ARM::ArchKind Arch = llvm::ARM::parseArch(getTriple().getArchName());
// Map the base architecture to an appropriate target feature, so we don't
// rely on the target triple.
llvm::ARM::ArchKind CPUArch = llvm::ARM::parseCPUArch(CPU);
if (CPUArch == llvm::ARM::ArchKind::INVALID)
CPUArch = Arch;
if (CPUArch != llvm::ARM::ArchKind::INVALID) {
ArchFeature = ("+" + llvm::ARM::getArchName(CPUArch)).str();
TargetFeatures.push_back(ArchFeature);
}
// get default FPU features
unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
// get default Extension features
unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
for (auto Feature : TargetFeatures)
if (Feature[0] == '+')
Features[Feature.drop_front(1)] = true;
// Enable or disable thumb-mode explicitly per function to enable mixed
// ARM and Thumb code generation.
if (isThumb())
Features["thumb-mode"] = true;
else
Features["thumb-mode"] = false;
// Convert user-provided arm and thumb GNU target attributes to
// [-|+]thumb-mode target features respectively.
std::vector<std::string> UpdatedFeaturesVec;
for (const auto &Feature : FeaturesVec) {
// Skip soft-float-abi; it's something we only use to initialize a bit of
// class state, and is otherwise unrecognized.
if (Feature == "+soft-float-abi")
continue;
StringRef FixedFeature;
if (Feature == "+arm")
FixedFeature = "-thumb-mode";
else if (Feature == "+thumb")
FixedFeature = "+thumb-mode";
else
FixedFeature = Feature;
UpdatedFeaturesVec.push_back(FixedFeature.str());
}
return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
}
bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
FPU = 0;
MVE = 0;
CRC = 0;
Crypto = 0;
DSP = 0;
Unaligned = 1;
SoftFloat = false;
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
DotProd = 0;
HasFloat16 = true;
// This does not diagnose illegal cases like having both
// "+vfpv2" and "+vfpv3" or having "+neon" and "-fp64".
for (const auto &Feature : Features) {
if (Feature == "+soft-float") {
SoftFloat = true;
- } else if (Feature == "+vfp2sp" || Feature == "+vfp2") {
+ } else if (Feature == "+vfp2sp" || Feature == "+vfp2d16sp" ||
+ Feature == "+vfp2" || Feature == "+vfp2d16") {
FPU |= VFP2FPU;
HW_FP |= HW_FP_SP;
- if (Feature == "+vfp2")
+ if (Feature == "+vfp2" || Feature == "+vfp2d16")
HW_FP |= HW_FP_DP;
} else if (Feature == "+vfp3sp" || Feature == "+vfp3d16sp" ||
Feature == "+vfp3" || Feature == "+vfp3d16") {
FPU |= VFP3FPU;
HW_FP |= HW_FP_SP;
if (Feature == "+vfp3" || Feature == "+vfp3d16")
HW_FP |= HW_FP_DP;
} else if (Feature == "+vfp4sp" || Feature == "+vfp4d16sp" ||
Feature == "+vfp4" || Feature == "+vfp4d16") {
FPU |= VFP4FPU;
HW_FP |= HW_FP_SP | HW_FP_HP;
if (Feature == "+vfp4" || Feature == "+vfp4d16")
HW_FP |= HW_FP_DP;
} else if (Feature == "+fp-armv8sp" || Feature == "+fp-armv8d16sp" ||
Feature == "+fp-armv8" || Feature == "+fp-armv8d16") {
FPU |= FPARMV8;
HW_FP |= HW_FP_SP | HW_FP_HP;
if (Feature == "+fp-armv8" || Feature == "+fp-armv8d16")
HW_FP |= HW_FP_DP;
} else if (Feature == "+neon") {
FPU |= NeonFPU;
HW_FP |= HW_FP_SP;
} else if (Feature == "+hwdiv") {
HWDiv |= HWDivThumb;
} else if (Feature == "+hwdiv-arm") {
HWDiv |= HWDivARM;
} else if (Feature == "+crc") {
CRC = 1;
} else if (Feature == "+crypto") {
Crypto = 1;
} else if (Feature == "+dsp") {
DSP = 1;
} else if (Feature == "+fp64") {
HW_FP |= HW_FP_DP;
} else if (Feature == "+8msecext") {
if (CPUProfile != "M" || ArchVersion != 8) {
Diags.Report(diag::err_target_unsupported_mcmse) << CPU;
return false;
}
} else if (Feature == "+strict-align") {
Unaligned = 0;
} else if (Feature == "+fp16") {
HW_FP |= HW_FP_HP;
} else if (Feature == "+fullfp16") {
HasLegalHalfType = true;
} else if (Feature == "+dotprod") {
DotProd = true;
} else if (Feature == "+mve") {
DSP = 1;
MVE |= MVE_INT;
} else if (Feature == "+mve.fp") {
DSP = 1;
HasLegalHalfType = true;
FPU |= FPARMV8;
MVE |= MVE_INT | MVE_FP;
HW_FP |= HW_FP_SP | HW_FP_HP;
}
}
switch (ArchVersion) {
case 6:
if (ArchProfile == llvm::ARM::ProfileKind::M)
LDREX = 0;
else if (ArchKind == llvm::ARM::ArchKind::ARMV6K)
LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
else
LDREX = LDREX_W;
break;
case 7:
if (ArchProfile == llvm::ARM::ProfileKind::M)
LDREX = LDREX_W | LDREX_H | LDREX_B;
else
LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
break;
case 8:
LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
}
if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
Diags.Report(diag::err_target_unsupported_fpmath) << "neon";
return false;
}
if (FPMath == FP_Neon)
Features.push_back("+neonfp");
else if (FPMath == FP_VFP)
Features.push_back("-neonfp");
return true;
}
bool ARMTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("arm", true)
.Case("aarch32", true)
.Case("softfloat", SoftFloat)
.Case("thumb", isThumb())
.Case("neon", (FPU & NeonFPU) && !SoftFloat)
.Case("vfp", FPU && !SoftFloat)
.Case("hwdiv", HWDiv & HWDivThumb)
.Case("hwdiv-arm", HWDiv & HWDivARM)
.Case("mve", hasMVE())
.Default(false);
}
bool ARMTargetInfo::isValidCPUName(StringRef Name) const {
return Name == "generic" ||
llvm::ARM::parseCPUArch(Name) != llvm::ARM::ArchKind::INVALID;
}
void ARMTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
llvm::ARM::fillValidCPUArchList(Values);
}
bool ARMTargetInfo::setCPU(const std::string &Name) {
if (Name != "generic")
setArchInfo(llvm::ARM::parseCPUArch(Name));
if (ArchKind == llvm::ARM::ArchKind::INVALID)
return false;
setAtomic();
CPU = Name;
return true;
}
bool ARMTargetInfo::setFPMath(StringRef Name) {
if (Name == "neon") {
FPMath = FP_Neon;
return true;
} else if (Name == "vfp" || Name == "vfp2" || Name == "vfp3" ||
Name == "vfp4") {
FPMath = FP_VFP;
return true;
}
return false;
}
void ARMTargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
}
void ARMTargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the ARMv8.1-A defines
getTargetDefinesARMV81A(Opts, Builder);
}
void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
Builder.defineMacro("__arm");
Builder.defineMacro("__arm__");
// For bare-metal none-eabi.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
(getTriple().getEnvironment() == llvm::Triple::EABI ||
getTriple().getEnvironment() == llvm::Triple::EABIHF))
Builder.defineMacro("__ELF__");
// Target properties.
Builder.defineMacro("__REGISTER_PREFIX__", "");
// Unfortunately, __ARM_ARCH_7K__ is now more of an ABI descriptor. The CPU
// happens to be Cortex-A7 though, so it should still get __ARM_ARCH_7A__.
if (getTriple().isWatchABI())
Builder.defineMacro("__ARM_ARCH_7K__", "2");
if (!CPUAttr.empty())
Builder.defineMacro("__ARM_ARCH_" + CPUAttr + "__");
// ACLE 6.4.1 ARM/Thumb instruction set architecture
// __ARM_ARCH is defined as an integer value indicating the current ARM ISA
Builder.defineMacro("__ARM_ARCH", Twine(ArchVersion));
if (ArchVersion >= 8) {
// ACLE 6.5.7 Crypto Extension
if (Crypto)
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
// ACLE 6.5.8 CRC32 Extension
if (CRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
// ACLE 6.5.10 Numeric Maximum and Minimum
Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
// ACLE 6.5.9 Directed Rounding
Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
}
// __ARM_ARCH_ISA_ARM is defined to 1 if the core supports the ARM ISA. It
// is not defined for the M-profile.
// NOTE that the default profile is assumed to be 'A'
if (CPUProfile.empty() || ArchProfile != llvm::ARM::ProfileKind::M)
Builder.defineMacro("__ARM_ARCH_ISA_ARM", "1");
// __ARM_ARCH_ISA_THUMB is defined to 1 if the core supports the original
// Thumb ISA (including v6-M and v8-M Baseline). It is set to 2 if the
// core supports the Thumb-2 ISA as found in the v6T2 architecture and all
// v7 and v8 architectures excluding v8-M Baseline.
if (supportsThumb2())
Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "2");
else if (supportsThumb())
Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "1");
// __ARM_32BIT_STATE is defined to 1 if code is being generated for a 32-bit
// instruction set such as ARM or Thumb.
Builder.defineMacro("__ARM_32BIT_STATE", "1");
// ACLE 6.4.2 Architectural Profile (A, R, M or pre-Cortex)
// __ARM_ARCH_PROFILE is defined as 'A', 'R', 'M' or 'S', or unset.
if (!CPUProfile.empty())
Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
// ACLE 6.4.3 Unaligned access supported in hardware
if (Unaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
// ACLE 6.4.4 LDREX/STREX
if (LDREX)
Builder.defineMacro("__ARM_FEATURE_LDREX", "0x" + Twine::utohexstr(LDREX));
// ACLE 6.4.5 CLZ
if (ArchVersion == 5 || (ArchVersion == 6 && CPUProfile != "M") ||
ArchVersion > 6)
Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
// ACLE 6.5.1 Hardware Floating Point
if (HW_FP)
Builder.defineMacro("__ARM_FP", "0x" + Twine::utohexstr(HW_FP));
// ACLE predefines.
Builder.defineMacro("__ARM_ACLE", "200");
// FP16 support (we currently only support IEEE format).
Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
Builder.defineMacro("__ARM_FP16_ARGS", "1");
// ACLE 6.5.3 Fused multiply-accumulate (FMA)
if (ArchVersion >= 7 && (FPU & VFP4FPU))
Builder.defineMacro("__ARM_FEATURE_FMA", "1");
// Subtarget options.
// FIXME: It's more complicated than this and we don't really support
// interworking.
// Windows on ARM does not "support" interworking
if (5 <= ArchVersion && ArchVersion <= 8 && !getTriple().isOSWindows())
Builder.defineMacro("__THUMB_INTERWORK__");
if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") {
// Embedded targets on Darwin follow AAPCS, but not EABI.
// Windows on ARM follows AAPCS VFP, but does not conform to EABI.
if (!getTriple().isOSBinFormatMachO() && !getTriple().isOSWindows())
Builder.defineMacro("__ARM_EABI__");
Builder.defineMacro("__ARM_PCS", "1");
}
if ((!SoftFloat && !SoftFloatABI) || ABI == "aapcs-vfp" || ABI == "aapcs16")
Builder.defineMacro("__ARM_PCS_VFP", "1");
if (SoftFloat)
Builder.defineMacro("__SOFTFP__");
// ACLE position independent code macros.
if (Opts.ROPI)
Builder.defineMacro("__ARM_ROPI", "1");
if (Opts.RWPI)
Builder.defineMacro("__ARM_RWPI", "1");
if (ArchKind == llvm::ARM::ArchKind::XSCALE)
Builder.defineMacro("__XSCALE__");
if (isThumb()) {
Builder.defineMacro("__THUMBEL__");
Builder.defineMacro("__thumb__");
if (supportsThumb2())
Builder.defineMacro("__thumb2__");
}
// ACLE 6.4.9 32-bit SIMD instructions
if ((CPUProfile != "M" && ArchVersion >= 6) || (CPUProfile == "M" && DSP))
Builder.defineMacro("__ARM_FEATURE_SIMD32", "1");
// ACLE 6.4.10 Hardware Integer Divide
if (((HWDiv & HWDivThumb) && isThumb()) ||
((HWDiv & HWDivARM) && !isThumb())) {
Builder.defineMacro("__ARM_FEATURE_IDIV", "1");
Builder.defineMacro("__ARM_ARCH_EXT_IDIV__", "1");
}
// Note, this is always on in gcc, even though it doesn't make sense.
Builder.defineMacro("__APCS_32__");
if (FPUModeIsVFP((FPUMode)FPU)) {
Builder.defineMacro("__VFP_FP__");
if (FPU & VFP2FPU)
Builder.defineMacro("__ARM_VFPV2__");
if (FPU & VFP3FPU)
Builder.defineMacro("__ARM_VFPV3__");
if (FPU & VFP4FPU)
Builder.defineMacro("__ARM_VFPV4__");
if (FPU & FPARMV8)
Builder.defineMacro("__ARM_FPV5__");
}
// This only gets set when Neon instructions are actually available, unlike
// the VFP define, hence the soft float and arch check. This is subtly
// different from gcc, we follow the intent which was that it should be set
// when Neon instructions are actually available.
if ((FPU & NeonFPU) && !SoftFloat && ArchVersion >= 7) {
Builder.defineMacro("__ARM_NEON", "1");
Builder.defineMacro("__ARM_NEON__");
// current AArch32 NEON implementations do not support double-precision
// floating-point even when it is present in VFP.
Builder.defineMacro("__ARM_NEON_FP",
"0x" + Twine::utohexstr(HW_FP & ~HW_FP_DP));
}
if (hasMVE()) {
Builder.defineMacro("__ARM_FEATURE_MVE", hasMVEFloat() ? "3" : "1");
}
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
Twine(Opts.WCharSize ? Opts.WCharSize : 4));
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
// CMSE
if (ArchVersion == 8 && ArchProfile == llvm::ARM::ProfileKind::M)
Builder.defineMacro("__ARM_FEATURE_CMSE", Opts.Cmse ? "3" : "1");
if (ArchVersion >= 6 && CPUAttr != "6M" && CPUAttr != "8M_BASE") {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
// ACLE 6.4.7 DSP instructions
if (DSP) {
Builder.defineMacro("__ARM_FEATURE_DSP", "1");
}
// ACLE 6.4.8 Saturation instructions
bool SAT = false;
if ((ArchVersion == 6 && CPUProfile != "M") || ArchVersion > 6) {
Builder.defineMacro("__ARM_FEATURE_SAT", "1");
SAT = true;
}
// ACLE 6.4.6 Q (saturation) flag
if (DSP || SAT)
Builder.defineMacro("__ARM_FEATURE_QBIT", "1");
if (Opts.UnsafeFPMath)
Builder.defineMacro("__ARM_FP_FAST", "1");
// Armv8.2-A FP16 vector intrinsic
if ((FPU & NeonFPU) && HasLegalHalfType)
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
// Armv8.2-A FP16 scalar intrinsics
if (HasLegalHalfType)
Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
// Armv8.2-A dot product intrinsics
if (DotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
switch (ArchKind) {
default:
break;
case llvm::ARM::ArchKind::ARMV8_1A:
getTargetDefinesARMV81A(Opts, Builder);
break;
case llvm::ARM::ArchKind::ARMV8_2A:
getTargetDefinesARMV82A(Opts, Builder);
break;
}
}
const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
{#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
{#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
#include "clang/Basic/BuiltinsARM.def"
};
ArrayRef<Builtin::Info> ARMTargetInfo::getTargetBuiltins() const {
return llvm::makeArrayRef(BuiltinInfo, clang::ARM::LastTSBuiltin -
Builtin::FirstTSBuiltin);
}
bool ARMTargetInfo::isCLZForZeroUndef() const { return false; }
TargetInfo::BuiltinVaListKind ARMTargetInfo::getBuiltinVaListKind() const {
return IsAAPCS
? AAPCSABIBuiltinVaList
: (getTriple().isWatchABI() ? TargetInfo::CharPtrBuiltinVaList
: TargetInfo::VoidPtrBuiltinVaList);
}
const char *const ARMTargetInfo::GCCRegNames[] = {
// Integer registers
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11",
"r12", "sp", "lr", "pc",
// Float registers
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
"s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
"s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
// Double registers
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
"d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
"d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
// Quad registers
"q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11",
"q12", "q13", "q14", "q15"};
ArrayRef<const char *> ARMTargetInfo::getGCCRegNames() const {
return llvm::makeArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
{{"a1"}, "r0"}, {{"a2"}, "r1"}, {{"a3"}, "r2"}, {{"a4"}, "r3"},
{{"v1"}, "r4"}, {{"v2"}, "r5"}, {{"v3"}, "r6"}, {{"v4"}, "r7"},
{{"v5"}, "r8"}, {{"v6", "rfp"}, "r9"}, {{"sl"}, "r10"}, {{"fp"}, "r11"},
{{"ip"}, "r12"}, {{"r13"}, "sp"}, {{"r14"}, "lr"}, {{"r15"}, "pc"},
// The S, D and Q registers overlap, but aren't really aliases; we
// don't want to substitute one of these for a different-sized one.
};
ArrayRef<TargetInfo::GCCRegAlias> ARMTargetInfo::getGCCRegAliases() const {
return llvm::makeArrayRef(GCCRegAliases);
}
bool ARMTargetInfo::validateAsmConstraint(
const char *&Name, TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
default:
break;
case 'l': // r0-r7
case 'h': // r8-r15
case 't': // VFP Floating point register single precision
case 'w': // VFP Floating point register double precision
Info.setAllowsRegister();
return true;
case 'I':
case 'J':
case 'K':
case 'L':
case 'M':
// FIXME
return true;
case 'Q': // A memory address that is a single base register.
Info.setAllowsMemory();
return true;
case 'T':
switch (Name[1]) {
default:
break;
case 'e': // Even general-purpose register
case 'o': // Odd general-purpose register
Info.setAllowsRegister();
Name++;
return true;
}
break;
case 'U': // a memory reference...
switch (Name[1]) {
case 'q': // ...ARMV4 ldrsb
case 'v': // ...VFP load/store (reg+constant offset)
case 'y': // ...iWMMXt load/store
case 't': // address valid for load/store opaque types wider
// than 128-bits
case 'n': // valid address for Neon doubleword vector load/store
case 'm': // valid address for Neon element and structure load/store
case 's': // valid address for non-offset loads/stores of quad-word
// values in four ARM registers
Info.setAllowsMemory();
Name++;
return true;
}
break;
}
return false;
}
std::string ARMTargetInfo::convertConstraint(const char *&Constraint) const {
std::string R;
switch (*Constraint) {
case 'U': // Two-character constraint; add "^" hint for later parsing.
case 'T':
R = std::string("^") + std::string(Constraint, 2);
Constraint++;
break;
case 'p': // 'p' should be translated to 'r' by default.
R = std::string("r");
break;
default:
return std::string(1, *Constraint);
}
return R;
}
bool ARMTargetInfo::validateConstraintModifier(
StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const {
bool isOutput = (Constraint[0] == '=');
bool isInOut = (Constraint[0] == '+');
// Strip off constraint modifiers.
while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
Constraint = Constraint.substr(1);
switch (Constraint[0]) {
default:
break;
case 'r': {
switch (Modifier) {
default:
return (isInOut || isOutput || Size <= 64);
case 'q':
// A register of size 32 cannot fit a vector type.
return false;
}
}
}
return true;
}
const char *ARMTargetInfo::getClobbers() const {
// FIXME: Is this really right?
return "";
}
TargetInfo::CallingConvCheckResult
ARMTargetInfo::checkCallingConvention(CallingConv CC) const {
switch (CC) {
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_Swift:
case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
}
}
int ARMTargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0)
return 0;
if (RegNo == 1)
return 1;
return -1;
}
bool ARMTargetInfo::hasSjLjLowering() const { return true; }
ARMleTargetInfo::ARMleTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: ARMTargetInfo(Triple, Opts) {}
void ARMleTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARMEL__");
ARMTargetInfo::getTargetDefines(Opts, Builder);
}
ARMbeTargetInfo::ARMbeTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: ARMTargetInfo(Triple, Opts) {}
void ARMbeTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARMEB__");
Builder.defineMacro("__ARM_BIG_ENDIAN");
ARMTargetInfo::getTargetDefines(Opts, Builder);
}
WindowsARMTargetInfo::WindowsARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsTargetInfo<ARMleTargetInfo>(Triple, Opts), Triple(Triple) {
}
void WindowsARMTargetInfo::getVisualStudioDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// FIXME: this is invalid for WindowsCE
Builder.defineMacro("_M_ARM_NT", "1");
Builder.defineMacro("_M_ARMT", "_M_ARM");
Builder.defineMacro("_M_THUMB", "_M_ARM");
assert((Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::thumb) &&
"invalid architecture for Windows ARM target info");
unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
Builder.defineMacro("_M_ARM", Triple.getArchName().substr(Offset));
// TODO map the complete set of values
// 31: VFPv3 40: VFPv4
Builder.defineMacro("_M_ARM_FP", "31");
}
TargetInfo::BuiltinVaListKind
WindowsARMTargetInfo::getBuiltinVaListKind() const {
return TargetInfo::CharPtrBuiltinVaList;
}
TargetInfo::CallingConvCheckResult
WindowsARMTargetInfo::checkCallingConvention(CallingConv CC) const {
switch (CC) {
case CC_X86StdCall:
case CC_X86ThisCall:
case CC_X86FastCall:
case CC_X86VectorCall:
return CCCR_Ignore;
case CC_C:
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
case CC_Swift:
return CCCR_OK;
default:
return CCCR_Warning;
}
}
// Windows ARM + Itanium C++ ABI Target
ItaniumWindowsARMleTargetInfo::ItaniumWindowsARMleTargetInfo(
const llvm::Triple &Triple, const TargetOptions &Opts)
: WindowsARMTargetInfo(Triple, Opts) {
TheCXXABI.set(TargetCXXABI::GenericARM);
}
void ItaniumWindowsARMleTargetInfo::getTargetDefines(
const LangOptions &Opts, MacroBuilder &Builder) const {
WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
if (Opts.MSVCCompat)
WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder);
}
// Windows ARM, MS (C++) ABI
MicrosoftARMleTargetInfo::MicrosoftARMleTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsARMTargetInfo(Triple, Opts) {
TheCXXABI.set(TargetCXXABI::Microsoft);
}
void MicrosoftARMleTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder);
}
MinGWARMTargetInfo::MinGWARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsARMTargetInfo(Triple, Opts) {
TheCXXABI.set(TargetCXXABI::GenericARM);
}
void MinGWARMTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
Builder.defineMacro("_ARM_");
}
CygwinARMTargetInfo::CygwinARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: ARMleTargetInfo(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedShort;
TLSSupported = false;
DoubleAlign = LongLongAlign = 64;
resetDataLayout("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
}
void CygwinARMTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
ARMleTargetInfo::getTargetDefines(Opts, Builder);
Builder.defineMacro("_ARM_");
Builder.defineMacro("__CYGWIN__");
Builder.defineMacro("__CYGWIN32__");
DefineStd(Builder, "unix", Opts);
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
}
DarwinARMTargetInfo::DarwinARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: DarwinTargetInfo<ARMleTargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
// iOS always has 64-bit atomic instructions.
// FIXME: This should be based off of the target features in
// ARMleTargetInfo.
MaxAtomicInlineWidth = 64;
if (Triple.isWatchABI()) {
// Darwin on iOS uses a variant of the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::WatchOS);
// BOOL should be a real boolean on the new ABI
UseSignedCharForObjCBool = false;
} else
TheCXXABI.set(TargetCXXABI::iOS);
}
void DarwinARMTargetInfo::getOSDefines(const LangOptions &Opts,
const llvm::Triple &Triple,
MacroBuilder &Builder) const {
getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
}
RenderScript32TargetInfo::RenderScript32TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: ARMleTargetInfo(llvm::Triple("armv7", Triple.getVendorName(),
Triple.getOSName(),
Triple.getEnvironmentName()),
Opts) {
IsRenderScriptTarget = true;
LongWidth = LongAlign = 64;
}
void RenderScript32TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__RENDERSCRIPT__");
ARMleTargetInfo::getTargetDefines(Opts, Builder);
}
Index: stable/12/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp (revision 356465)
@@ -1,5070 +1,5075 @@
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Expr nodes as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
#include <string>
using namespace clang;
using namespace CodeGen;
//===--------------------------------------------------------------------===//
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
unsigned addressSpace =
cast<llvm::PointerType>(value->getType())->getAddressSpace();
llvm::PointerType *destType = Int8PtrTy;
if (addressSpace)
destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
if (value->getType() == destType) return value;
return Builder.CreateBitCast(value, destType);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
CharUnits Align,
const Twine &Name,
llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getQuantity());
return Address(Alloca, Align);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The alloca is casted to default address space if necessary.
Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
const Twine &Name,
llvm::Value *ArraySize,
Address *AllocaAddr) {
auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
if (AllocaAddr)
*AllocaAddr = Alloca;
llvm::Value *V = Alloca.getPointer();
// Alloca always returns a pointer in alloca address space, which may
// be different from the type defined by the language. For example,
// in C++ the auto variables are in the default address space. Therefore
// cast alloca to the default address space when necessary.
if (getASTAllocaAddressSpace() != LangAS::Default) {
auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
// When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
// otherwise alloca is inserted at the current insertion point of the
// builder.
if (!ArraySize)
Builder.SetInsertPoint(AllocaInsertPt);
V = getTargetHooks().performAddrSpaceCast(
*this, V, getASTAllocaAddressSpace(), LangAS::Default,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
return Address(V, Align);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
/// block if \p ArraySize is nullptr, otherwise inserts it at the current
/// insertion point of the builder.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name,
llvm::Value *ArraySize) {
if (ArraySize)
return Builder.CreateAlloca(Ty, ArraySize, Name);
return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
ArraySize, Name, AllocaInsertPt);
}
/// CreateDefaultAlignTempAlloca - This creates an alloca with the
/// default alignment of the corresponding LLVM type, which is *not*
/// guaranteed to be related in any way to the expected alignment of
/// an AST type that might have been lowered to Ty.
Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
const Twine &Name) {
CharUnits Align =
CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
return CreateTempAlloca(Ty, Align, Name);
}
void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
assert(isa<llvm::AllocaInst>(Var.getPointer()));
auto *Store = new llvm::StoreInst(Init, Var.getPointer());
Store->setAlignment(Var.getAlignment().getQuantity());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
}
Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
return CreateTempAlloca(ConvertType(Ty), Align, Name);
}
Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
Address *Alloca) {
// FIXME: Should we prefer the preferred type alignment here?
return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
}
Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
const Twine &Name, Address *Alloca) {
return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
/*ArraySize=*/nullptr, Alloca);
}
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
const Twine &Name) {
return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
}
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
const Twine &Name) {
return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
Name);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
PGO.setCurrentStmt(E);
if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
llvm::Value *MemPtr = EmitScalarExpr(E);
return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
}
QualType BoolTy = getContext().BoolTy;
SourceLocation Loc = E->getExprLoc();
if (!E->getType()->isAnyComplexType())
return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
Loc);
}
/// EmitIgnoredExpr - Emit code to compute the specified expression,
/// ignoring the result.
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
if (E->isRValue())
return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
// Just emit it as an l-value and drop the result.
EmitLValue(E);
}
/// EmitAnyExpr - Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
/// If this is an aggregate expression, AggSlot indicates where the
/// result should be returned.
RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
AggValueSlot aggSlot,
bool ignoreResult) {
switch (getEvaluationKind(E->getType())) {
case TEK_Scalar:
return RValue::get(EmitScalarExpr(E, ignoreResult));
case TEK_Complex:
return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
case TEK_Aggregate:
if (!ignoreResult && aggSlot.isIgnored())
aggSlot = CreateAggTemp(E->getType(), "agg-temp");
EmitAggExpr(E, aggSlot);
return aggSlot.asRValue();
}
llvm_unreachable("bad evaluation kind");
}
/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
AggValueSlot AggSlot = AggValueSlot::ignored();
if (hasAggregateEvaluationKind(E->getType()))
AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
return EmitAnyExpr(E, AggSlot);
}
/// EmitAnyExprToMem - Evaluate an expression into a given memory
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
Address Location,
Qualifiers Quals,
bool IsInit) {
// FIXME: This function should take an LValue as an argument.
switch (getEvaluationKind(E->getType())) {
case TEK_Complex:
EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
/*isInit*/ false);
return;
case TEK_Aggregate: {
EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit),
AggValueSlot::MayOverlap));
return;
}
case TEK_Scalar: {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
EmitStoreThroughLValue(RV, LV);
return;
}
}
llvm_unreachable("bad evaluation kind");
}
static void
pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
const Expr *E, Address ReferenceTemporary) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
//
// FIXME: This should be looking at E, not M.
if (auto Lifetime = M->getType().getObjCLifetime()) {
switch (Lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
// Carry on to normal cleanup handling.
break;
case Qualifiers::OCL_Autoreleasing:
// Nothing to do; cleaned up by an autorelease pool.
return;
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
switch (StorageDuration Duration = M->getStorageDuration()) {
case SD_Static:
// Note: we intentionally do not register a cleanup to release
// the object on program termination.
return;
case SD_Thread:
// FIXME: We should probably register a cleanup in this case.
return;
case SD_Automatic:
case SD_FullExpression:
CodeGenFunction::Destroyer *Destroy;
CleanupKind CleanupKind;
if (Lifetime == Qualifiers::OCL_Strong) {
const ValueDecl *VD = M->getExtendingDecl();
bool Precise =
VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind = CGF.getARCCleanupKind();
Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
: &CodeGenFunction::destroyARCStrongImprecise;
} else {
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
CleanupKind = NormalAndEHCleanup;
Destroy = &CodeGenFunction::destroyARCWeak;
}
if (Duration == SD_FullExpression)
CGF.pushDestroy(CleanupKind, ReferenceTemporary,
M->getType(), *Destroy,
CleanupKind & EHCleanup);
else
CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
M->getType(),
*Destroy, CleanupKind & EHCleanup);
return;
case SD_Dynamic:
llvm_unreachable("temporary cannot have dynamic storage duration");
}
llvm_unreachable("unknown storage duration");
}
}
CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
if (const RecordType *RT =
E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
// Get the destructor for the reference temporary.
auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
if (!ClassDecl->hasTrivialDestructor())
ReferenceTemporaryDtor = ClassDecl->getDestructor();
}
if (!ReferenceTemporaryDtor)
return;
// Call the destructor for the temporary.
switch (M->getStorageDuration()) {
case SD_Static:
case SD_Thread: {
llvm::FunctionCallee CleanupFn;
llvm::Constant *CleanupArg;
if (E->getType()->isArrayType()) {
CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
ReferenceTemporary, E->getType(),
CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
}
CGF.CGM.getCXXABI().registerGlobalDtor(
CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
break;
}
case SD_FullExpression:
CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
CodeGenFunction::destroyCXXObject,
CGF.getLangOpts().Exceptions);
break;
case SD_Automatic:
CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
ReferenceTemporary, E->getType(),
CodeGenFunction::destroyCXXObject,
CGF.getLangOpts().Exceptions);
break;
case SD_Dynamic:
llvm_unreachable("temporary cannot have dynamic storage duration");
}
}
static Address createReferenceTemporary(CodeGenFunction &CGF,
const MaterializeTemporaryExpr *M,
const Expr *Inner,
Address *Alloca = nullptr) {
auto &TCG = CGF.getTargetHooks();
switch (M->getStorageDuration()) {
case SD_FullExpression:
case SD_Automatic: {
// If we have a constant temporary array or record try to promote it into a
// constant global under the same rules a normal constant would've been
// promoted. This is easier on the optimizer and generally emits fewer
// instructions.
QualType Ty = Inner->getType();
if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
(Ty->isArrayType() || Ty->isRecordType()) &&
CGF.CGM.isTypeConstant(Ty, true))
if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) {
auto AS = AddrSpace.getValue();
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
llvm::GlobalValue::NotThreadLocal,
CGF.getContext().getTargetAddressSpace(AS));
CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
GV->setAlignment(alignment.getQuantity());
llvm::Constant *C = GV;
if (AS != LangAS::Default)
C = TCG.performAddrSpaceCast(
CGF.CGM, GV, AS, LangAS::Default,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
return Address(C, alignment);
}
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
case SD_Thread:
case SD_Static:
return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
case SD_Dynamic:
llvm_unreachable("temporary can't have dynamic storage duration");
}
llvm_unreachable("unknown storage duration");
}
LValue CodeGenFunction::
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->GetTemporaryExpr();
assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
!cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
"Reference should never be pseudo-strong!");
// FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
// as that will cause the lifetime adjustment to be lost for ARC
auto ownership = M->getType().getObjCLifetime();
if (ownership != Qualifiers::OCL_None &&
ownership != Qualifiers::OCL_ExplicitNone) {
Address Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
Object = Address(llvm::ConstantExpr::getBitCast(Var,
ConvertTypeForMem(E->getType())
->getPointerTo(Object.getAddressSpace())),
Object.getAlignment());
// createReferenceTemporary will promote the temporary to a global with a
// constant initializer if it can. It can only do this to a value of
// ARC-manageable type if the value is global and therefore "immune" to
// ref-counting operations. Therefore we have no need to emit either a
// dynamic initialization or a cleanup and we can just return the address
// of the temporary.
if (Var->hasInitializer())
return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
AlignmentSource::Decl);
switch (getEvaluationKind(E->getType())) {
default: llvm_unreachable("expected scalar or aggregate expression");
case TEK_Scalar:
EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
break;
case TEK_Aggregate: {
EmitAggExpr(E, AggValueSlot::forAddr(Object,
E->getType().getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
break;
}
}
pushTemporaryCleanup(*this, M, E, Object);
return RefTempDst;
}
SmallVector<const Expr *, 2> CommaLHSs;
SmallVector<SubobjectAdjustment, 2> Adjustments;
E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
for (const auto &Ignored : CommaLHSs)
EmitIgnoredExpr(Ignored);
if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
if (opaque->getType()->isRecordType()) {
assert(Adjustments.empty());
return EmitOpaqueValueLValue(opaque);
}
}
// Create and initialize the reference temporary.
Address Alloca = Address::invalid();
Address Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
Object = Address(llvm::ConstantExpr::getBitCast(
cast<llvm::Constant>(Object.getPointer()),
ConvertTypeForMem(E->getType())->getPointerTo()),
Object.getAlignment());
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
// initialized it.
if (!Var->hasInitializer()) {
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
}
} else {
switch (M->getStorageDuration()) {
case SD_Automatic:
if (auto *Size = EmitLifetimeStart(
CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
Alloca.getPointer())) {
pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
Alloca, Size);
}
break;
case SD_FullExpression: {
if (!ShouldEmitLifetimeMarkers)
break;
// Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
// marker. Instead, start the lifetime of a conditional temporary earlier
// so that it's unconditional. Don't do this in ASan's use-after-scope
// mode so that it gets the more precise lifetime marks. If the type has
// a non-trivial destructor, we'll have a cleanup block for it anyway,
// so this typically doesn't help; skip it in that case.
ConditionalEvaluation *OldConditional = nullptr;
CGBuilderTy::InsertPoint OldIP;
if (isInConditionalBranch() && !E->getType().isDestructedType() &&
!CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
OldConditional = OutermostConditional;
OutermostConditional = nullptr;
OldIP = Builder.saveIP();
llvm::BasicBlock *Block = OldConditional->getStartingBlock();
Builder.restoreIP(CGBuilderTy::InsertPoint(
Block, llvm::BasicBlock::iterator(Block->back())));
}
if (auto *Size = EmitLifetimeStart(
CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
Alloca.getPointer())) {
pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
Size);
}
if (OldConditional) {
OutermostConditional = OldConditional;
Builder.restoreIP(OldIP);
}
break;
}
default:
break;
}
EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
}
pushTemporaryCleanup(*this, M, E, Object);
// Perform derived-to-base casts and/or field accesses, to get from the
// temporary object we created (and, potentially, for which we extended
// the lifetime) to the subobject we're binding the reference to.
for (unsigned I = Adjustments.size(); I != 0; --I) {
SubobjectAdjustment &Adjustment = Adjustments[I-1];
switch (Adjustment.Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
Object =
GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
Adjustment.DerivedToBase.BasePath->path_begin(),
Adjustment.DerivedToBase.BasePath->path_end(),
/*NullCheckValue=*/ false, E->getExprLoc());
break;
case SubobjectAdjustment::FieldAdjustment: {
LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
Object = LV.getAddress();
break;
}
case SubobjectAdjustment::MemberPointerAdjustment: {
llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
Adjustment.Ptr.MPT);
break;
}
}
}
return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
}
RValue
CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
// Emit the expression as an lvalue.
LValue LV = EmitLValue(E);
assert(LV.isSimple());
llvm::Value *Value = LV.getPointer();
if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
// C++11 [dcl.ref]p5 (as amended by core issue 453):
// If a glvalue to which a reference is directly bound designates neither
// an existing object or function of an appropriate type nor a region of
// storage of suitable size and alignment to contain an object of the
// reference's type, the behavior is undefined.
QualType Ty = E->getType();
EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
}
return RValue::get(Value);
}
/// getAccessedFieldNo - Given an encoded value and a result number, return the
/// input field number being accessed.
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
const llvm::Constant *Elts) {
return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
->getZExtValue();
}
/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
llvm::Value *High) {
llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
llvm::Value *K47 = Builder.getInt64(47);
llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
return Builder.CreateMul(B1, KMul);
}
bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
}
bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
(TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
}
bool CodeGenFunction::sanitizePerformTypeCheck() const {
return SanOpts.has(SanitizerKind::Null) |
SanOpts.has(SanitizerKind::Alignment) |
SanOpts.has(SanitizerKind::ObjectSize) |
SanOpts.has(SanitizerKind::Vptr);
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Ptr, QualType Ty,
CharUnits Alignment,
SanitizerSet SkippedChecks,
llvm::Value *ArraySize) {
if (!sanitizePerformTypeCheck())
return;
// Don't check pointers outside the default address space. The null check
// isn't correct, the object-size check isn't supported by LLVM, and we can't
// communicate the addresses to the runtime handler for the vptr check.
if (Ptr->getType()->getPointerAddressSpace())
return;
// Don't check pointers to volatile data. The behavior here is implementation-
// defined.
if (Ty.isVolatileQualified())
return;
SanitizerScope SanScope(this);
SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
llvm::BasicBlock *Done = nullptr;
// Quickly determine whether we have a pointer to an alloca. It's possible
// to skip null checks, and some alignment checks, for these pointers. This
// can reduce compile-time significantly.
auto PtrToAlloca =
dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases());
llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
llvm::Value *IsNonNull = nullptr;
bool IsGuaranteedNonNull =
SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
bool AllowNullPointers = isNullPointerAllowed(TCK);
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
!IsGuaranteedNonNull) {
// The glvalue must not be an empty glvalue.
IsNonNull = Builder.CreateIsNotNull(Ptr);
// The IR builder can constant-fold the null check if the pointer points to
// a constant.
IsGuaranteedNonNull = IsNonNull == True;
// Skip the null check if the pointer is known to be non-null.
if (!IsGuaranteedNonNull) {
if (AllowNullPointers) {
// When performing pointer casts, it's OK if the value is null.
// Skip the remaining checks in that case.
Done = createBasicBlock("null");
llvm::BasicBlock *Rest = createBasicBlock("not.null");
Builder.CreateCondBr(IsNonNull, Rest, Done);
EmitBlock(Rest);
} else {
Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
}
}
}
if (SanOpts.has(SanitizerKind::ObjectSize) &&
!SkippedChecks.has(SanitizerKind::ObjectSize) &&
!Ty->isIncompleteType()) {
uint64_t TySize = getContext().getTypeSizeInChars(Ty).getQuantity();
llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
if (ArraySize)
Size = Builder.CreateMul(Size, ArraySize);
// Degenerate case: new X[0] does not need an objectsize check.
llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
if (!ConstantSize || !ConstantSize->isNullValue()) {
// The glvalue must refer to a large enough storage region.
// FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
// to check this.
// FIXME: Get object address space
llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
llvm::Value *Min = Builder.getFalse();
llvm::Value *NullIsUnknown = Builder.getFalse();
llvm::Value *Dynamic = Builder.getFalse();
llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
llvm::Value *LargeEnough = Builder.CreateICmpUGE(
Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
}
}
uint64_t AlignVal = 0;
llvm::Value *PtrAsInt = nullptr;
if (SanOpts.has(SanitizerKind::Alignment) &&
!SkippedChecks.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
// The glvalue must be suitably aligned.
if (AlignVal > 1 &&
(!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
llvm::Value *Align = Builder.CreateAnd(
PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
if (Aligned != True)
Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
}
}
if (Checks.size() > 0) {
// Make sure we're not losing information. Alignment needs to be a power of
// 2
assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
llvm::ConstantInt::get(Int8Ty, TCK)};
EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
PtrAsInt ? PtrAsInt : Ptr);
}
// If possible, check that the vptr indicates that there is a subobject of
// type Ty at offset zero within this object.
//
// C++11 [basic.life]p5,6:
// [For storage which does not refer to an object within its lifetime]
// The program has undefined behavior if:
// -- the [pointer or glvalue] is used to access a non-static data member
// or call a non-static member function
if (SanOpts.has(SanitizerKind::Vptr) &&
!SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
// Ensure that the pointer is non-null before loading it. If there is no
// compile-time guarantee, reuse the run-time null check or emit a new one.
if (!IsGuaranteedNonNull) {
if (!IsNonNull)
IsNonNull = Builder.CreateIsNotNull(Ptr);
if (!Done)
Done = createBasicBlock("vptr.null");
llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
EmitBlock(VptrNotNull);
}
// Compute a hash of the mangled name of the type.
//
// FIXME: This is not guaranteed to be deterministic! Move to a
// fingerprinting mechanism once LLVM provides one. For the time
// being the implementation happens to be deterministic.
SmallString<64> MangledName;
llvm::raw_svector_ostream Out(MangledName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
Out);
// Blacklist based on the mangled type.
if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
SanitizerKind::Vptr, Out.str())) {
llvm::hash_code TypeHash = hash_value(Out.str());
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
Hash = Builder.CreateTrunc(Hash, IntPtrTy);
// Look the hash up in our cache.
const int CacheSize = 128;
llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
"__ubsan_vptr_type_cache");
llvm::Value *Slot = Builder.CreateAnd(Hash,
llvm::ConstantInt::get(IntPtrTy,
CacheSize-1));
llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
llvm::Value *CacheVal =
Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
getPointerAlign());
// If the hash isn't in the cache, call a runtime handler to perform the
// hard work of checking whether the vptr is for an object of the right
// type. This will either fill in the cache and return, or produce a
// diagnostic.
llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty),
CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
llvm::ConstantInt::get(Int8Ty, TCK)
};
llvm::Value *DynamicData[] = { Ptr, Hash };
EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
SanitizerHandler::DynamicTypeCacheMiss, StaticData,
DynamicData);
}
}
if (Done) {
Builder.CreateBr(Done);
EmitBlock(Done);
}
}
/// Determine whether this expression refers to a flexible array member in a
/// struct. We disable array bounds checks for such members.
static bool isFlexibleArrayMemberExpr(const Expr *E) {
// For compatibility with existing code, we treat arrays of length 0 or
// 1 as flexible array members.
const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
if (CAT->getSize().ugt(1))
return false;
} else if (!isa<IncompleteArrayType>(AT))
return false;
E = E->IgnoreParens();
// A flexible array member must be the last member in the class.
if (const auto *ME = dyn_cast<MemberExpr>(E)) {
// FIXME: If the base type of the member expr is not FD->getParent(),
// this should not be treated as a flexible array member access.
if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
RecordDecl::field_iterator FI(
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
}
} else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
return IRE->getDecl()->getNextIvar() == nullptr;
}
return false;
}
llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
QualType EltTy) {
ASTContext &C = getContext();
uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
if (!EltSize)
return nullptr;
auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
if (!ArrayDeclRef)
return nullptr;
auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
if (!ParamDecl)
return nullptr;
auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
if (!POSAttr)
return nullptr;
// Don't load the size if it's a lower bound.
int POSType = POSAttr->getType();
if (POSType != 0 && POSType != 1)
return nullptr;
// Find the implicit size parameter.
auto PassedSizeIt = SizeArguments.find(ParamDecl);
if (PassedSizeIt == SizeArguments.end())
return nullptr;
const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
C.getSizeType(), E->getExprLoc());
llvm::Value *SizeOfElement =
llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
}
/// If Base is known to point to the start of an array, return the length of
/// that array. Return 0 if the length cannot be determined.
static llvm::Value *getArrayIndexingBound(
CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
// For the vector indexing extension, the bound is the number of elements.
if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
IndexedType = Base->getType();
return CGF.Builder.getInt32(VT->getNumElements());
}
Base = Base->IgnoreParens();
if (const auto *CE = dyn_cast<CastExpr>(Base)) {
if (CE->getCastKind() == CK_ArrayToPointerDecay &&
!isFlexibleArrayMemberExpr(CE->getSubExpr())) {
IndexedType = CE->getSubExpr()->getType();
const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
return CGF.Builder.getInt(CAT->getSize());
else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
return CGF.getVLASize(VAT).NumElts;
// Ignore pass_object_size here. It's not applicable on decayed pointers.
}
}
QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
IndexedType = Base->getType();
return POS;
}
return nullptr;
}
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
SanitizerScope SanScope(this);
QualType IndexedType;
llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
if (!Bound)
return;
bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(E->getExprLoc()),
EmitCheckTypeDescriptor(IndexedType),
EmitCheckTypeDescriptor(IndexType)
};
llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
: Builder.CreateICmpULE(IndexVal, BoundVal);
EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
SanitizerHandler::OutOfBounds, StaticData, Index);
}
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
llvm::Value *NextVal;
if (isa<llvm::IntegerType>(InVal.first->getType())) {
uint64_t AmountVal = isInc ? 1 : -1;
NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
// Add the inc/dec to the real part.
NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
} else {
QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
if (!isInc)
FVal.changeSign();
NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
// Add the inc/dec to the real part.
NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
}
ComplexPairTy IncVal(NextVal, InVal.second);
// Store the updated result through the lvalue.
EmitStoreOfComplex(IncVal, LV, /*init*/ false);
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
return isPre ? IncVal : InVal;
}
void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
CodeGenFunction *CGF) {
// Bind VLAs in the cast type.
if (CGF && E->getType()->isVariablyModifiedType())
CGF->EmitVariablyModifiedType(E->getType());
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitExplicitCastType(E->getType());
}
//===----------------------------------------------------------------------===//
// LValue Expression Emission
//===----------------------------------------------------------------------===//
/// EmitPointerWithAlignment - Given an expression of pointer type, try to
/// derive a more accurate bound on the alignment of the pointer.
Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
// We allow this with ObjC object pointers because of fragile ABIs.
assert(E->getType()->isPointerType() ||
E->getType()->isObjCObjectPointerType());
E = E->IgnoreParens();
// Casts:
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
CGM.EmitExplicitCastExprType(ECE, this);
switch (CE->getCastKind()) {
// Non-converting casts (but not C's implicit conversion from void*).
case CK_BitCast:
case CK_NoOp:
case CK_AddressSpaceConversion:
if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
if (PtrTy->getPointeeType()->isVoidType())
break;
LValueBaseInfo InnerBaseInfo;
TBAAAccessInfo InnerTBAAInfo;
Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
&InnerBaseInfo,
&InnerTBAAInfo);
if (BaseInfo) *BaseInfo = InnerBaseInfo;
if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
if (isa<ExplicitCastExpr>(CE)) {
LValueBaseInfo TargetTypeBaseInfo;
TBAAAccessInfo TargetTypeTBAAInfo;
CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(),
&TargetTypeBaseInfo,
&TargetTypeTBAAInfo);
if (TBAAInfo)
*TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
TargetTypeTBAAInfo);
// If the source l-value is opaque, honor the alignment of the
// casted-to type.
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
Addr = Address(Addr.getPointer(), Align);
}
}
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
CE->getCastKind() == CK_BitCast) {
if (auto PT = E->getType()->getAs<PointerType>())
EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_UnrelatedCast,
CE->getBeginLoc());
}
return CE->getCastKind() != CK_AddressSpaceConversion
? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
: Builder.CreateAddrSpaceCast(Addr,
ConvertType(E->getType()));
}
break;
// Array-to-pointer decay.
case CK_ArrayToPointerDecay:
return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
// Derived-to-base conversions.
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
// TODO: Support accesses to members of base classes in TBAA. For now, we
// conservatively pretend that the complete object is of the base class
// type.
if (TBAAInfo)
*TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
return GetAddressOfBaseClass(Addr, Derived,
CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE),
CE->getExprLoc());
}
// TODO: Is there any reason to treat base-to-derived conversions
// specially?
default:
break;
}
}
// Unary &.
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
if (UO->getOpcode() == UO_AddrOf) {
LValue LV = EmitLValue(UO->getSubExpr());
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
return LV.getAddress();
}
}
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo,
TBAAInfo);
return Address(EmitScalarExpr(E), Align);
}
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
if (Ty->isVoidType())
return RValue::get(nullptr);
switch (getEvaluationKind(Ty)) {
case TEK_Complex: {
llvm::Type *EltTy =
ConvertType(Ty->castAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
}
// If this is a use of an undefined aggregate type, the aggregate must have an
// identifiable address. Just because the contents of the value are undefined
// doesn't mean that the address can't be taken and compared.
case TEK_Aggregate: {
Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar:
return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
}
llvm_unreachable("bad evaluation kind");
}
RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
return GetUndefRValue(E->getType());
}
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
E->getType());
}
bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
const Expr *Base = Obj;
while (!isa<CXXThisExpr>(Base)) {
// The result of a dynamic_cast can be null.
if (isa<CXXDynamicCastExpr>(Base))
return false;
if (const auto *CE = dyn_cast<CastExpr>(Base)) {
Base = CE->getSubExpr();
} else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
Base = PE->getSubExpr();
} else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
if (UO->getOpcode() == UO_Extension)
Base = UO->getSubExpr();
else
return false;
} else {
return false;
}
}
return true;
}
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV;
if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
else
LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
SanitizerSet SkippedChecks;
if (const auto *ME = dyn_cast<MemberExpr>(E)) {
bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
if (IsBaseCXXThis)
SkippedChecks.set(SanitizerKind::Alignment, true);
if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
SkippedChecks.set(SanitizerKind::Null, true);
}
EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
E->getType(), LV.getAlignment(), SkippedChecks);
}
return LV;
}
/// EmitLValue - Emit code to compute a designator that specifies the location
/// of the expression.
///
/// This can return one of two things: a simple address or a bitfield reference.
/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
/// an LLVM pointer type.
///
/// If this returns a bitfield reference, nothing about the pointee type of the
/// LLVM value is known: For example, it may not be a pointer to an integer.
///
/// If this returns a normal address, and if the lvalue's C type is fixed size,
/// this method guarantees that the returned pointer type will point to an LLVM
/// type of the same size of the lvalue's type. If the lvalue has a variable
/// length type, this is not possible.
///
LValue CodeGenFunction::EmitLValue(const Expr *E) {
ApplyDebugLocation DL(*this, E);
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
case Expr::ObjCPropertyRefExprClass:
llvm_unreachable("cannot emit a property reference directly");
case Expr::ObjCSelectorExprClass:
return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
case Expr::CompoundAssignOperatorClass: {
QualType Ty = E->getType();
if (const AtomicType *AT = Ty->getAs<AtomicType>())
Ty = AT->getValueType();
if (!Ty->isAnyComplexType())
return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
}
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
case Expr::ConstantExprClass:
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
case Expr::ParenExprClass:
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
case Expr::PredefinedExprClass:
return EmitPredefinedLValue(cast<PredefinedExpr>(E));
case Expr::StringLiteralClass:
return EmitStringLiteralLValue(cast<StringLiteral>(E));
case Expr::ObjCEncodeExprClass:
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
case Expr::PseudoObjectExprClass:
return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
case Expr::InitListExprClass:
return EmitInitListLValue(cast<InitListExpr>(E));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
case Expr::CXXUuidofExprClass:
return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
case Expr::LambdaExprClass:
return EmitAggExprToLValue(E);
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
enterFullExpression(cleanups);
RunCleanupsScope Scope(*this);
LValue LV = EmitLValue(cleanups->getSubExpr());
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
llvm::Value *V = LV.getPointer();
Scope.ForceCleanup({&V});
return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
return LV;
}
case Expr::CXXDefaultArgExprClass: {
auto *DAE = cast<CXXDefaultArgExpr>(E);
CXXDefaultArgExprScope Scope(*this, DAE);
return EmitLValue(DAE->getExpr());
}
case Expr::CXXDefaultInitExprClass: {
auto *DIE = cast<CXXDefaultInitExpr>(E);
CXXDefaultInitExprScope Scope(*this, DIE);
return EmitLValue(DIE->getExpr());
}
case Expr::CXXTypeidExprClass:
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
case Expr::ObjCMessageExprClass:
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
case Expr::ObjCIvarRefExprClass:
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
case Expr::ArraySubscriptExprClass:
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
case Expr::OMPArraySectionExprClass:
return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
case Expr::MemberExprClass:
return EmitMemberExpr(cast<MemberExpr>(E));
case Expr::CompoundLiteralExprClass:
return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
case Expr::ConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
case Expr::BinaryConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::SubstNonTypeTemplateParmExprClass:
return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
case Expr::MaterializeTemporaryExprClass:
return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
case Expr::CoawaitExprClass:
return EmitCoawaitLValue(cast<CoawaitExpr>(E));
case Expr::CoyieldExprClass:
return EmitCoyieldLValue(cast<CoyieldExpr>(E));
}
}
/// Given an object of the given canonical type, can we safely copy a
/// value out of it based on its initializer?
static bool isConstantEmittableObjectType(QualType type) {
assert(type.isCanonical());
assert(!type->isReferenceType());
// Must be const-qualified but non-volatile.
Qualifiers qs = type.getLocalQualifiers();
if (!qs.hasConst() || qs.hasVolatile()) return false;
// Otherwise, all object types satisfy this except C++ classes with
// mutable subobjects or non-trivial copy/destroy behavior.
if (const auto *RT = dyn_cast<RecordType>(type))
if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
if (RD->hasMutableFields() || !RD->isTrivial())
return false;
return true;
}
/// Can we constant-emit a load of a reference to a variable of the
/// given type? This is different from predicates like
/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
/// in situations that don't necessarily satisfy the language's rules
/// for this (e.g. C++'s ODR-use rules). For example, we want to able
/// to do this with const float variables even if those variables
/// aren't marked 'constexpr'.
enum ConstantEmissionKind {
CEK_None,
CEK_AsReferenceOnly,
CEK_AsValueOrReference,
CEK_AsValueOnly
};
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
type = type.getCanonicalType();
if (const auto *ref = dyn_cast<ReferenceType>(type)) {
if (isConstantEmittableObjectType(ref->getPointeeType()))
return CEK_AsValueOrReference;
return CEK_AsReferenceOnly;
}
if (isConstantEmittableObjectType(type))
return CEK_AsValueOnly;
return CEK_None;
}
/// Try to emit a reference to the given value without producing it as
/// an l-value. This is just an optimization, but it avoids us needing
/// to emit global copies of variables if they're named without triggering
/// a formal use in a context where we can't emit a direct reference to them,
/// for instance if a block or lambda or a member of a local class uses a
/// const int variable or constexpr variable from an enclosing function.
CodeGenFunction::ConstantEmission
CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
ValueDecl *value = refExpr->getDecl();
// The value needs to be an enum constant or a constant variable.
ConstantEmissionKind CEK;
if (isa<ParmVarDecl>(value)) {
CEK = CEK_None;
} else if (auto *var = dyn_cast<VarDecl>(value)) {
CEK = checkVarTypeForConstantEmission(var->getType());
} else if (isa<EnumConstantDecl>(value)) {
CEK = CEK_AsValueOnly;
} else {
CEK = CEK_None;
}
if (CEK == CEK_None) return ConstantEmission();
Expr::EvalResult result;
bool resultIsReference;
QualType resultType;
// It's best to evaluate all the way as an r-value if that's permitted.
if (CEK != CEK_AsReferenceOnly &&
refExpr->EvaluateAsRValue(result, getContext())) {
resultIsReference = false;
resultType = refExpr->getType();
// Otherwise, try to evaluate as an l-value.
} else if (CEK != CEK_AsValueOnly &&
refExpr->EvaluateAsLValue(result, getContext())) {
resultIsReference = true;
resultType = value->getType();
// Failure.
} else {
return ConstantEmission();
}
// In any case, if the initializer has side-effects, abandon ship.
if (result.HasSideEffects)
return ConstantEmission();
// Emit as a constant.
auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
result.Val, resultType);
// Make sure we emit a debug reference to the global variable.
// This should probably fire even for
if (isa<VarDecl>(value)) {
if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
EmitDeclRefExprDbgValue(refExpr, result.Val);
} else {
assert(isa<EnumConstantDecl>(value));
EmitDeclRefExprDbgValue(refExpr, result.Val);
}
// If we emitted a reference constant, we need to dereference that.
if (resultIsReference)
return ConstantEmission::forReference(C);
return ConstantEmission::forValue(C);
}
static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
const MemberExpr *ME) {
if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
// Try to emit static variable member expressions as DREs.
return DeclRefExpr::Create(
CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
/*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
}
return nullptr;
}
CodeGenFunction::ConstantEmission
CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
return tryEmitAsConstant(DRE);
return ConstantEmission();
}
llvm::Value *CodeGenFunction::emitScalarConstant(
const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
assert(Constant && "not a constant");
if (Constant.isReference())
return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
E->getExprLoc())
.getScalarVal();
return Constant.getValue();
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), Loc, lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), lvalue.isNontemporal());
}
static bool hasBooleanRepresentation(QualType Ty) {
if (Ty->isBooleanType())
return true;
if (const EnumType *ET = Ty->getAs<EnumType>())
return ET->getDecl()->getIntegerType()->isBooleanType();
if (const AtomicType *AT = Ty->getAs<AtomicType>())
return hasBooleanRepresentation(AT->getValueType());
return false;
}
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
bool StrictEnums, bool IsBool) {
const EnumType *ET = Ty->getAs<EnumType>();
bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
ET && !ET->getDecl()->isFixed();
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
if (IsBool) {
Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
} else {
const EnumDecl *ED = ET->getDecl();
llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
unsigned Bitwidth = LTy->getScalarSizeInBits();
unsigned NumNegativeBits = ED->getNumNegativeBits();
unsigned NumPositiveBits = ED->getNumPositiveBits();
if (NumNegativeBits) {
unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
assert(NumBits <= Bitwidth);
End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
Min = -End;
} else {
assert(NumPositiveBits <= Bitwidth);
End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
Min = llvm::APInt(Bitwidth, 0);
}
}
return true;
}
llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
llvm::APInt Min, End;
if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
hasBooleanRepresentation(Ty)))
return nullptr;
llvm::MDBuilder MDHelper(getLLVMContext());
return MDHelper.createRange(Min, End);
}
bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
SourceLocation Loc) {
bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
if (!HasBoolCheck && !HasEnumCheck)
return false;
bool IsBool = hasBooleanRepresentation(Ty) ||
NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
bool NeedsBoolCheck = HasBoolCheck && IsBool;
bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
if (!NeedsBoolCheck && !NeedsEnumCheck)
return false;
// Single-bit booleans don't need to be checked. Special-case this to avoid
// a bit width mismatch when handling bitfield values. This is handled by
// EmitFromMemory for the non-bitfield case.
if (IsBool &&
cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
return false;
llvm::APInt Min, End;
if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
return true;
auto &Ctx = getLLVMContext();
SanitizerScope SanScope(this);
llvm::Value *Check;
--End;
if (!Min) {
Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
} else {
llvm::Value *Upper =
Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
llvm::Value *Lower =
Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
Check = Builder.CreateAnd(Upper, Lower);
}
llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty)};
SanitizerMask Kind =
NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
StaticArgs, EmitCheckValue(Value));
return true;
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
QualType Ty,
SourceLocation Loc,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isNontemporal) {
if (!CGM.getCodeGenOpts().PreserveVec3Type) {
// For better performance, handle vector loads differently.
if (Ty->isVectorType()) {
const llvm::Type *EltTy = Addr.getElementType();
const auto *VTy = cast<llvm::VectorType>(EltTy);
// Handle vectors of size 3 like size 4 for better performance.
if (VTy->getNumElements() == 3) {
// Bitcast to vec4 type.
llvm::VectorType *vec4Ty =
llvm::VectorType::get(VTy->getElementType(), 4);
Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
// Now load value.
llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
{0, 1, 2}, "extractVec");
return EmitFromMemory(V, Ty);
}
}
}
// Atomic operations have to be done on integral types.
LValue AtomicLValue =
LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
}
llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
if (isNontemporal) {
llvm::MDNode *Node = llvm::MDNode::get(
Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
}
CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
if (EmitScalarRangeCheck(Load, Ty, Loc)) {
// In order to prevent the optimizer from throwing away the check, don't
// attach range metadata to the load.
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
return EmitFromMemory(Load, Ty);
}
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
if (hasBooleanRepresentation(Ty)) {
// This should really always be an i1, but sometimes it's already
// an i8, and it's awkward to track those cases down.
if (Value->getType()->isIntegerTy(1))
return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
"wrong value rep of bool");
}
return Value;
}
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
if (hasBooleanRepresentation(Ty)) {
assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
"wrong value rep of bool");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
return Value;
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
bool Volatile, QualType Ty,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isInit, bool isNontemporal) {
if (!CGM.getCodeGenOpts().PreserveVec3Type) {
// Handle vectors differently to get better performance.
if (Ty->isVectorType()) {
llvm::Type *SrcTy = Value->getType();
auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
// Handle vec3 special.
if (VecTy && VecTy->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
Builder.getInt32(2),
llvm::UndefValue::get(Builder.getInt32Ty())};
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
MaskV, "extractVec");
SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
}
if (Addr.getElementType() != SrcTy) {
Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
}
}
}
Value = EmitToMemory(Value, Ty);
LValue AtomicLValue =
LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
if (Ty->isAtomicType() ||
(!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
return;
}
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (isNontemporal) {
llvm::MDNode *Node =
llvm::MDNode::get(Store->getContext(),
llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
}
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
// In MRC mode, we do a load+autorelease.
if (!getLangOpts().ObjCAutoRefCount) {
return RValue::get(EmitARCLoadWeak(LV.getAddress()));
}
// In ARC mode, we load retained and then consume the value.
llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
Object = EmitObjCConsumeObject(LV.getType(), Object);
return RValue::get(Object);
}
if (LV.isSimple()) {
assert(!LV.getType()->isFunctionType());
// Everything needs a load.
return RValue::get(EmitLoadOfScalar(LV, Loc));
}
if (LV.isVectorElt()) {
llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
LV.isVolatileQualified());
return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
// If this is a reference to a subset of the elements of a vector, either
// shuffle the input or extract/insert them as appropriate.
if (LV.isExtVectorElt())
return EmitLoadOfExtVectorElementLValue(LV);
// Global Register variables always invoke intrinsics
if (LV.isGlobalReg())
return EmitLoadOfGlobalRegLValue(LV);
assert(LV.isBitField() && "Unknown LValue type!");
return EmitLoadOfBitfieldLValue(LV, Loc);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
SourceLocation Loc) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress();
llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
if (Info.IsSigned) {
assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
if (HighBits)
Val = Builder.CreateShl(Val, HighBits, "bf.shl");
if (Info.Offset + HighBits)
Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
} else {
if (Info.Offset)
Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
Info.Size),
"bf.clear");
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc);
return RValue::get(Val);
}
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
LV.isVolatileQualified());
const llvm::Constant *Elts = LV.getExtVectorElts();
// If the result of the expression is a non-vector type, we must be extracting
// a single element. Just codegen as an extractelement.
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
return RValue::get(Builder.CreateExtractElement(Vec, Elt));
}
// Always use shuffle vector to try to retain the original program structure
unsigned NumResultElts = ExprVT->getNumElements();
SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i)
Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
MaskV);
return RValue::get(Vec);
}
/// Generates lvalue for partial ext_vector access.
Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
Address VectorAddress = LV.getExtVectorAddress();
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
QualType EQT = ExprVT->getElementType();
llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
Address CastToPointerElement =
Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
"conv.ptr.element");
const llvm::Constant *Elts = LV.getExtVectorElts();
unsigned ix = getAccessedFieldNo(0, Elts);
Address VectorBasePtrPlusIx =
Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
"vector.elt");
return VectorBasePtrPlusIx;
}
/// Load of global gamed gegisters are always calls to intrinsics.
RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
"Bad type for register variable");
llvm::MDNode *RegName = cast<llvm::MDNode>(
cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
// We accept integer and pointer types only
llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
llvm::Type *Ty = OrigTy;
if (OrigTy->isPointerTy())
Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
llvm::Type *Types[] = { Ty };
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
llvm::Value *Call = Builder.CreateCall(
F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
if (OrigTy->isPointerTy())
Call = Builder.CreateIntToPtr(Call, OrigTy);
return RValue::get(Call);
}
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
Dst.isVolatileQualified());
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
Builder.CreateStore(Vec, Dst.getVectorAddress(),
Dst.isVolatileQualified());
return;
}
// If this is an update of extended vector elements, insert them as
// appropriate.
if (Dst.isExtVectorElt())
return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
if (Dst.isGlobalReg())
return EmitStoreThroughGlobalRegLValue(Src, Dst);
assert(Dst.isBitField() && "Unknown LValue type");
return EmitStoreThroughBitfieldLValue(Src, Dst);
}
// There's special magic for assigning into an ARC-qualified l-value.
if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
switch (Lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
// nothing special
break;
case Qualifiers::OCL_Strong:
if (isInit) {
Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
break;
}
EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Weak:
if (isInit)
// Initialize and then skip the primitive store.
EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
else
EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Autoreleasing:
Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
Src.getScalarVal()));
// fall into the normal path
break;
}
}
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
}
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
llvm::Type *ResultType = IntPtrTy;
Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
llvm::Value *RHS = dst.getPointer();
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
llvm::Value *LHS =
Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
"sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
BytesBetween);
} else if (Dst.isGlobalObjCRef()) {
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
Dst.isThreadLocalRef());
}
else
CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
return;
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
Address Ptr = Dst.getBitFieldAddress();
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
// Cast the source to the storage type and shift it into place.
SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
/*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
// See if there are other bits in the bitfield's storage we'll need to load
// and mask together with source before storing.
if (Info.StorageSize != Info.Size) {
assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val =
Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateAnd(SrcVal,
llvm::APInt::getLowBitsSet(Info.StorageSize,
Info.Size),
"bf.value");
MaskedVal = SrcVal;
if (Info.Offset)
SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
// Mask out the original value.
Val = Builder.CreateAnd(Val,
~llvm::APInt::getBitsSet(Info.StorageSize,
Info.Offset,
Info.Offset + Info.Size),
"bf.clear");
// Or together the unchanged values and the source value.
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
assert(Info.Offset == 0);
}
// Write the new value back out.
Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
// Return the new value of the bit-field, if requested.
if (Result) {
llvm::Value *ResultVal = MaskedVal;
// Sign extend the value if needed.
if (Info.IsSigned) {
assert(Info.Size <= Info.StorageSize);
unsigned HighBits = Info.StorageSize - Info.Size;
if (HighBits) {
ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
}
}
ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
"bf.result.cast");
*Result = EmitFromMemory(ResultVal, Dst.getType());
}
}
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
Dst.isVolatileQualified());
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
unsigned NumDstElts = Vec->getType()->getVectorNumElements();
if (NumDstElts == NumSrcElts) {
// Use shuffle vector is the src and destination are the same number of
// elements and restore the vector mask since it is on the side it will be
// stored.
SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i)
Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
MaskV);
} else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
SmallVector<llvm::Constant*, 4> ExtMask;
for (unsigned i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(Builder.getInt32(i));
ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
ExtMaskV);
// build identity
SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
Mask.push_back(Builder.getInt32(i));
// When the vector size is odd and .odd or .hi is used, the last element
// of the Elts constant array will be one past the size of the vector.
// Ignore the last element here, if it is greater than the mask size.
if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
NumSrcElts--;
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i)
Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
} else {
// We should never shorten the vector
llvm_unreachable("unexpected shorten vector length");
}
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
Dst.isVolatileQualified());
}
/// Store of global named registers are always calls to intrinsics.
void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
"Bad type for register variable");
llvm::MDNode *RegName = cast<llvm::MDNode>(
cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
assert(RegName && "Register LValue is not metadata");
// We accept integer and pointer types only
llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
llvm::Type *Ty = OrigTy;
if (OrigTy->isPointerTy())
Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
llvm::Type *Types[] = { Ty };
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *Value = Src.getScalarVal();
if (OrigTy->isPointerTy())
Value = Builder.CreatePtrToInt(Value, Ty);
Builder.CreateCall(
F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
}
// setObjCGCLValueClass - sets class of the lvalue for the purpose of
// generating write-barries API. It is currently a global, ivar,
// or neither.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LValue &LV,
bool IsMemberAccess=false) {
if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
return;
if (isa<ObjCIvarRefExpr>(E)) {
QualType ExpTy = E->getType();
if (IsMemberAccess && ExpTy->isPointerType()) {
// If ivar is a structure pointer, assigning to field of
// this struct follows gcc's behavior and makes it a non-ivar
// writer-barrier conservatively.
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType()) {
LV.setObjCIvar(false);
return;
}
}
LV.setObjCIvar(true);
auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
LV.setBaseIvarExp(Exp->getBase());
LV.setObjCArray(E->getType()->isArrayType());
return;
}
if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
if (VD->hasGlobalStorage()) {
LV.setGlobalObjCRef(true);
LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
}
}
LV.setObjCArray(E->getType()->isArrayType());
return;
}
if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
if (LV.isObjCIvar()) {
// If cast is to a structure pointer, follow gcc's behavior and make it
// a non-ivar write-barrier.
QualType ExpTy = E->getType();
if (ExpTy->isPointerType())
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType())
LV.setObjCIvar(false);
}
return;
}
if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
return;
}
if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
if (LV.isObjCIvar() && !LV.isObjCArray())
// Using array syntax to assigning to what an ivar points to is not
// same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
LV.setObjCIvar(false);
else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
// Using array syntax to assigning to what global points to is not
// same as assigning to the global itself. {id *G;} G[i] = 0;
LV.setGlobalObjCRef(false);
return;
}
if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
// We don't know if member is an 'ivar', but this flag is looked at
// only in the context of LV.isObjCIvar().
LV.setObjCArray(E->getType()->isArrayType());
return;
}
}
static llvm::Value *
EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
llvm::Value *V, llvm::Type *IRType,
StringRef Name = StringRef()) {
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
}
static LValue EmitThreadPrivateVarDeclLValue(
CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
llvm::Type *RealVarTy, SourceLocation Loc) {
Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD, QualType T) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
// Return an invalid address if variable is MT_To and unified
// memory is not enabled. For all other cases: MT_Link and
// MT_To with unified memory, return a valid address.
if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
!CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
return Address::invalid();
assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
"Expected link clause OR to clause with unified memory enabled.");
QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
}
Address
CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo,
TBAAAccessInfo *PointeeTBAAInfo) {
llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(),
RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
CharUnits Align = getNaturalTypeAlignment(RefLVal.getType()->getPointeeType(),
PointeeBaseInfo, PointeeTBAAInfo,
/* forPointeeType= */ true);
return Address(Load, Align);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
LValueBaseInfo PointeeBaseInfo;
TBAAAccessInfo PointeeTBAAInfo;
Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
&PointeeTBAAInfo);
return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
PointeeBaseInfo, PointeeTBAAInfo);
}
Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
const PointerType *PtrTy,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(),
BaseInfo, TBAAInfo,
/*forPointeeType=*/true));
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
const PointerType *PtrTy) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
}
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
const Expr *E, const VarDecl *VD) {
QualType T = E->getType();
// If it's thread_local, emit a call to its wrapper function instead.
if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
CGF.CGM.getCXXABI().usesThreadWrapperFunction())
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
// Check if the variable is marked as declare target with link clause in
// device codegen.
if (CGF.getLangOpts().OpenMPIsDevice) {
Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
if (Addr.isValid())
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
Address Addr(V, Alignment);
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
E->getExprLoc());
}
LValue LV = VD->getType()->isReferenceType() ?
CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
AlignmentSource::Decl) :
CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
}
static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
const FunctionDecl *FD) {
if (FD->hasAttr<WeakRefAttr>()) {
ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
return aliasee.getPointer();
}
llvm::Constant *V = CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
// Ugly case: for a K&R-style definition, the type of the definition
// isn't the same as the type of a use. Correct for this with a
// bitcast.
QualType NoProtoType =
CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
NoProtoType = CGM.getContext().getPointerType(NoProtoType);
V = llvm::ConstantExpr::getBitCast(V,
CGM.getTypes().ConvertType(NoProtoType));
}
}
return V;
}
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
const Expr *E, const FunctionDecl *FD) {
llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment,
AlignmentSource::Decl);
}
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
llvm::Value *ThisValue) {
QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
return CGF.EmitLValueForField(LV, FD);
}
/// Named Registers are named metadata pointing to the register name
/// which will be read from/written to as an argument to the intrinsic
/// @llvm.read/write_register.
/// So far, only the name is being passed down, but other options such as
/// register type, allocation type or even optimization options could be
/// passed down via the metadata node.
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
SmallString<64> Name("llvm.named.register.");
AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
assert(Asm->getLabel().size() < 64-Name.size() &&
"Register name too big");
Name.append(Asm->getLabel());
llvm::NamedMDNode *M =
CGM.getModule().getOrInsertNamedMetadata(Name);
if (M->getNumOperands() == 0) {
llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
Asm->getLabel());
llvm::Metadata *Ops[] = {Str};
M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
llvm::Value *Ptr =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
}
/// Determine whether we can emit a reference to \p VD from the current
/// context, despite not necessarily having seen an odr-use of the variable in
/// this context.
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
const DeclRefExpr *E,
const VarDecl *VD,
bool IsConstant) {
// For a variable declared in an enclosing scope, do not emit a spurious
// reference even if we have a capture, as that will emit an unwarranted
// reference to our capture state, and will likely generate worse code than
// emitting a local copy.
if (E->refersToEnclosingVariableOrCapture())
return false;
// For a local declaration declared in this function, we can always reference
// it even if we don't have an odr-use.
if (VD->hasLocalStorage()) {
return VD->getDeclContext() ==
dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
}
// For a global declaration, we can emit a reference to it if we know
// for sure that we are able to emit a definition of it.
VD = VD->getDefinition(CGF.getContext());
if (!VD)
return false;
// Don't emit a spurious reference if it might be to a variable that only
// exists on a different device / target.
// FIXME: This is unnecessarily broad. Check whether this would actually be a
// cross-target reference.
if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
CGF.getLangOpts().OpenCL) {
return false;
}
// We can emit a spurious reference only if the linkage implies that we'll
// be emitting a non-interposable symbol that will be retained until link
// time.
switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
case llvm::GlobalValue::ExternalLinkage:
case llvm::GlobalValue::LinkOnceODRLinkage:
case llvm::GlobalValue::WeakODRLinkage:
case llvm::GlobalValue::InternalLinkage:
case llvm::GlobalValue::PrivateLinkage:
return true;
default:
return false;
}
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
QualType T = E->getType();
assert(E->isNonOdrUse() != NOUR_Unevaluated &&
"should not emit an unevaluated operand");
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
// Global Named registers access via intrinsics only
if (VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
return EmitGlobalNamedRegister(VD, CGM);
// If this DeclRefExpr does not constitute an odr-use of the variable,
// we're not permitted to emit a reference to it in general, and it might
// not be captured if capture would be necessary for a use. Emit the
// constant value directly instead.
if (E->isNonOdrUse() == NOUR_Constant &&
(VD->getType()->isReferenceType() ||
!canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
VD->getAnyInitializer(VD);
llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
E->getLocation(), *VD->evaluateValue(), VD->getType());
assert(Val && "failed to emit constant expression");
Address Addr = Address::invalid();
if (!VD->getType()->isReferenceType()) {
// Spill the constant value to a global.
Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
getContext().getDeclAlign(VD));
+ llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
+ auto *PTy = llvm::PointerType::get(
+ VarTy, getContext().getTargetAddressSpace(VD->getType()));
+ if (PTy != Addr.getType())
+ Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy);
} else {
// Should we be using the alignment of the constant pointer we emitted?
CharUnits Alignment =
getNaturalTypeAlignment(E->getType(),
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
Addr = Address(Val, Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
// FIXME: Handle other kinds of non-odr-use DeclRefExprs.
// Check for captured variables.
if (E->refersToEnclosingVariableOrCapture()) {
VD = VD->getCanonicalDecl();
if (auto *FD = LambdaCaptureFields.lookup(VD))
return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
else if (CapturedStmtInfo) {
auto I = LocalDeclMap.find(VD);
if (I != LocalDeclMap.end()) {
if (VD->getType()->isReferenceType())
return EmitLoadOfReferenceLValue(I->second, VD->getType(),
AlignmentSource::Decl);
return MakeAddrLValue(I->second, T);
}
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
return MakeAddrLValue(
Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)),
CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
CapLVal.getTBAAInfo());
}
assert(isa<BlockDecl>(CurCodeDecl));
Address addr = GetAddrOfBlockDecl(VD);
return MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
}
// FIXME: We should be able to assert this for FunctionDecls as well!
// FIXME: We should be able to assert this for all DeclRefExprs, not just
// those with a valid source location.
assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
!E->getLocation().isValid()) &&
"Should not use decl without marking it used!");
if (ND->hasAttr<WeakRefAttr>()) {
const auto *VD = cast<ValueDecl>(ND);
ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
}
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
// Check if this is a global variable.
if (VD->hasLinkage() || VD->isStaticDataMember())
return EmitGlobalVarDeclLValue(*this, E, VD);
Address addr = Address::invalid();
// The variable should generally be present in the local decl map.
auto iter = LocalDeclMap.find(VD);
if (iter != LocalDeclMap.end()) {
addr = iter->second;
// Otherwise, it might be static local we haven't emitted yet for
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
addr = Address(CGM.getOrCreateStaticVarDecl(
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
getContext().getDeclAlign(VD));
// No other cases for now.
} else {
llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
}
// Check for OpenMP threadprivate variables.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
return EmitThreadPrivateVarDeclLValue(
*this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
E->getExprLoc());
}
// Drill into block byref variables.
bool isBlockByref = VD->isEscapingByref();
if (isBlockByref) {
addr = emitBlockByrefAddress(addr, VD);
}
// Drill into reference types.
LValue LV = VD->getType()->isReferenceType() ?
EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
MakeAddrLValue(addr, T, AlignmentSource::Decl);
bool isLocalStorage = VD->hasLocalStorage();
bool NonGCable = isLocalStorage &&
!VD->getType()->isReferenceType() &&
!isBlockByref;
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
}
bool isImpreciseLifetime =
(isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
if (isImpreciseLifetime)
LV.setARCPreciseLifetime(ARCImpreciseLifetime);
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
// FIXME: While we're emitting a binding from an enclosing scope, all other
// DeclRefExprs we see should be implicitly treated as if they also refer to
// an enclosing scope.
if (const auto *BD = dyn_cast<BindingDecl>(ND))
return EmitLValue(BD->getBinding());
llvm_unreachable("Unhandled DeclRefExpr");
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __extension__ doesn't affect lvalue-ness.
if (E->getOpcode() == UO_Extension)
return EmitLValue(E->getSubExpr());
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
switch (E->getOpcode()) {
default: llvm_unreachable("Unknown unary operator lvalue!");
case UO_Deref: {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
&TBAAInfo);
LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
}
case UO_Real:
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
!LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
QualType T = ExprTy->castAs<ComplexType>()->getElementType();
Address Component =
(E->getOpcode() == UO_Real
? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
: emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, T));
ElemLV.getQuals().addQualifiers(LV.getQuals());
return ElemLV;
}
case UO_PreInc:
case UO_PreDec: {
LValue LV = EmitLValue(E->getSubExpr());
bool isInc = E->getOpcode() == UO_PreInc;
if (E->getType()->isAnyComplexType())
EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
else
EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
return LV;
}
}
}
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
auto SL = E->getFunctionName();
assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
StringRef FnName = CurFn->getName();
if (FnName.startswith("\01"))
FnName = FnName.substr(1);
StringRef NameItems[] = {
PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
std::string Name = SL->getString();
if (!Name.empty()) {
unsigned Discriminator =
CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
if (Discriminator)
Name += "_" + Twine(Discriminator + 1).str();
auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
} else {
auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
}
auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
/// Emit a type description suitable for use by a runtime sanitizer library. The
/// format of a type descriptor is
///
/// \code
/// { i16 TypeKind, i16 TypeInfo }
/// \endcode
///
/// followed by an array of i8 containing the type name. TypeKind is 0 for an
/// integer, 1 for a floating point value, and -1 for anything else.
llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
// Only emit each type's descriptor once.
if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
return C;
uint16_t TypeKind = -1;
uint16_t TypeInfo = 0;
if (T->isIntegerType()) {
TypeKind = 0;
TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
(T->isSignedIntegerType() ? 1 : 0);
} else if (T->isFloatingType()) {
TypeKind = 1;
TypeInfo = getContext().getTypeSize(T);
}
// Format the type name as if for a diagnostic, including quotes and
// optionally an 'aka'.
SmallString<32> Buffer;
CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
(intptr_t)T.getAsOpaquePtr(),
StringRef(), StringRef(), None, Buffer,
None);
llvm::Constant *Components[] = {
Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
};
llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), Descriptor->getType(),
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
// Remember the descriptor for this type.
CGM.setTypeDescriptorInMap(T, GV);
return GV;
}
llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
llvm::Type *TargetTy = IntPtrTy;
if (V->getType() == TargetTy)
return V;
// Floating-point types which fit into intptr_t are bitcast to integers
// and then passed directly (after zero-extension, if necessary).
if (V->getType()->isFloatingPointTy()) {
unsigned Bits = V->getType()->getPrimitiveSizeInBits();
if (Bits <= TargetTy->getIntegerBitWidth())
V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
Bits));
}
// Integers which fit in intptr_t are zero-extended and passed directly.
if (V->getType()->isIntegerTy() &&
V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
return Builder.CreateZExt(V, TargetTy);
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
V = Ptr.getPointer();
}
return Builder.CreatePtrToInt(V, TargetTy);
}
/// Emit a representation of a SourceLocation for passing to a handler
/// in a sanitizer runtime library. The format for this data is:
/// \code
/// struct SourceLocation {
/// const char *Filename;
/// int32_t Line, Column;
/// };
/// \endcode
/// For an invalid SourceLocation, the Filename pointer is null.
llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
llvm::Constant *Filename;
int Line, Column;
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
if (PLoc.isValid()) {
StringRef FilenameString = PLoc.getFilename();
int PathComponentsToStrip =
CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
if (PathComponentsToStrip < 0) {
assert(PathComponentsToStrip != INT_MIN);
int PathComponentsToKeep = -PathComponentsToStrip;
auto I = llvm::sys::path::rbegin(FilenameString);
auto E = llvm::sys::path::rend(FilenameString);
while (I != E && --PathComponentsToKeep)
++I;
FilenameString = FilenameString.substr(I - E);
} else if (PathComponentsToStrip > 0) {
auto I = llvm::sys::path::begin(FilenameString);
auto E = llvm::sys::path::end(FilenameString);
while (I != E && PathComponentsToStrip--)
++I;
if (I != E)
FilenameString =
FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
else
FilenameString = llvm::sys::path::filename(FilenameString);
}
auto FilenameGV = CGM.GetAddrOfConstantCString(FilenameString, ".src");
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
Filename = FilenameGV.getPointer();
Line = PLoc.getLine();
Column = PLoc.getColumn();
} else {
Filename = llvm::Constant::getNullValue(Int8PtrTy);
Line = Column = 0;
}
llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
Builder.getInt32(Column)};
return llvm::ConstantStruct::getAnon(Data);
}
namespace {
/// Specify under what conditions this check can be recovered
enum class CheckRecoverableKind {
/// Always terminate program execution if this check fails.
Unrecoverable,
/// Check supports recovering, runtime has both fatal (noreturn) and
/// non-fatal handlers for this check.
Recoverable,
/// Runtime conditionally aborts, always need to support recovery.
AlwaysRecoverable
};
}
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
assert(Kind.countPopulation() == 1);
if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
return CheckRecoverableKind::AlwaysRecoverable;
else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
return CheckRecoverableKind::Unrecoverable;
else
return CheckRecoverableKind::Recoverable;
}
namespace {
struct SanitizerHandlerInfo {
char const *const Name;
unsigned Version;
};
}
const SanitizerHandlerInfo SanitizerHandlers[] = {
#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
LIST_SANITIZER_CHECKS
#undef SANITIZER_CHECK
};
static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::FunctionType *FnType,
ArrayRef<llvm::Value *> FnArgs,
SanitizerHandler CheckHandler,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
Optional<ApplyDebugLocation> DL;
if (!CGF.Builder.getCurrentDebugLocation()) {
// Ensure that the call has at least an artificial debug location.
DL.emplace(CGF, SourceLocation());
}
bool NeedsAbortSuffix =
IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
const StringRef CheckName = CheckInfo.Name;
std::string FnName = "__ubsan_handle_" + CheckName.str();
if (CheckInfo.Version && !MinimalRuntime)
FnName += "_v" + llvm::utostr(CheckInfo.Version);
if (MinimalRuntime)
FnName += "_minimal";
if (NeedsAbortSuffix)
FnName += "_abort";
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
llvm::AttrBuilder B;
if (!MayReturn) {
B.addAttribute(llvm::Attribute::NoReturn)
.addAttribute(llvm::Attribute::NoUnwind);
}
B.addAttribute(llvm::Attribute::UWTable);
llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
llvm::AttributeList::get(CGF.getLLVMContext(),
llvm::AttributeList::FunctionIndex, B),
/*Local=*/true);
llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
if (!MayReturn) {
HandlerCall->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
} else {
CGF.Builder.CreateBr(ContBB);
}
}
void CodeGenFunction::EmitCheck(
ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs) {
assert(IsSanitizerScope);
assert(Checked.size() > 0);
assert(CheckHandler >= 0 &&
size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
llvm::Value *FatalCond = nullptr;
llvm::Value *RecoverableCond = nullptr;
llvm::Value *TrapCond = nullptr;
for (int i = 0, n = Checked.size(); i < n; ++i) {
llvm::Value *Check = Checked[i].first;
// -fsanitize-trap= overrides -fsanitize-recover=.
llvm::Value *&Cond =
CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
? TrapCond
: CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
? RecoverableCond
: FatalCond;
Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
}
if (TrapCond)
EmitTrapCheck(TrapCond);
if (!FatalCond && !RecoverableCond)
return;
llvm::Value *JointCond;
if (FatalCond && RecoverableCond)
JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
else
JointCond = FatalCond ? FatalCond : RecoverableCond;
assert(JointCond);
CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
assert(SanOpts.has(Checked[0].second));
#ifndef NDEBUG
for (int i = 1, n = Checked.size(); i < n; ++i) {
assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
"All recoverable kinds in a single check must be same!");
assert(SanOpts.has(Checked[i].second));
}
#endif
llvm::BasicBlock *Cont = createBasicBlock("cont");
llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
// Give hint that we very much don't expect to execute the handler
// Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
llvm::MDBuilder MDHelper(getLLVMContext());
llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
EmitBlock(Handlers);
// Handler functions take an i8* pointing to the (handler-specific) static
// information block, followed by a sequence of intptr_t arguments
// representing operand values.
SmallVector<llvm::Value *, 4> Args;
SmallVector<llvm::Type *, 4> ArgTypes;
if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
Args.reserve(DynamicArgs.size() + 1);
ArgTypes.reserve(DynamicArgs.size() + 1);
// Emit handler arguments and create handler function type.
if (!StaticArgs.empty()) {
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
llvm::GlobalVariable::PrivateLinkage, Info);
InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
ArgTypes.push_back(Int8PtrTy);
}
for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
Args.push_back(EmitCheckValue(DynamicArgs[i]));
ArgTypes.push_back(IntPtrTy);
}
}
llvm::FunctionType *FnType =
llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
if (!FatalCond || !RecoverableCond) {
// Simple case: we need to generate a single handler call, either
// fatal, or non-fatal.
emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
(FatalCond != nullptr), Cont);
} else {
// Emit two handler calls: first one for set of unrecoverable checks,
// another one for recoverable.
llvm::BasicBlock *NonFatalHandlerBB =
createBasicBlock("non_fatal." + CheckName);
llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
EmitBlock(FatalHandlerBB);
emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
NonFatalHandlerBB);
EmitBlock(NonFatalHandlerBB);
emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
Cont);
}
EmitBlock(Cont);
}
void CodeGenFunction::EmitCfiSlowPathCheck(
SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
llvm::MDBuilder MDHelper(getLLVMContext());
llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
EmitBlock(CheckBB);
bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
llvm::CallInst *CheckCall;
llvm::FunctionCallee SlowPathFn;
if (WithDiag) {
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
llvm::GlobalVariable::PrivateLinkage, Info);
InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
SlowPathFn = CGM.getModule().getOrInsertFunction(
"__cfi_slowpath_diag",
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
false));
CheckCall = Builder.CreateCall(
SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
} else {
SlowPathFn = CGM.getModule().getOrInsertFunction(
"__cfi_slowpath",
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
}
CGM.setDSOLocal(
cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
CheckCall->setDoesNotThrow();
EmitBlock(Cont);
}
// Emit a stub for __cfi_check function so that the linker knows about this
// symbol in LTO mode.
void CodeGenFunction::EmitCfiCheckStub() {
llvm::Module *M = &CGM.getModule();
auto &Ctx = M->getContext();
llvm::Function *F = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
CGM.setDSOLocal(F);
llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
// FIXME: consider emitting an intrinsic call like
// call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
// which can be lowered in CrossDSOCFI pass to the actual contents of
// __cfi_check. This would allow inlining of __cfi_check calls.
llvm::CallInst::Create(
llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
llvm::ReturnInst::Create(Ctx, nullptr, BB);
}
// This function is basically a switch over the CFI failure kind, which is
// extracted from CFICheckFailData (1st function argument). Each case is either
// llvm.trap or a call to one of the two runtime handlers, based on
// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
// failure kind) traps, but this should really never happen. CFICheckFailData
// can be nullptr if the calling module has -fsanitize-trap behavior for this
// check kind; in this case __cfi_check_fail traps as well.
void CodeGenFunction::EmitCfiCheckFail() {
SanitizerScope SanScope(this);
FunctionArgList Args;
ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
ImplicitParamDecl::Other);
ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&ArgData);
Args.push_back(&ArgAddr);
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
llvm::Function *F = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
F->setVisibility(llvm::GlobalValue::HiddenVisibility);
StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
SourceLocation());
// This function should not be affected by blacklist. This function does
// not have a source location, but "src:*" would still apply. Revert any
// changes to SanOpts made in StartFunction.
SanOpts = CGM.getLangOpts().Sanitize;
llvm::Value *Data =
EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, ArgData.getLocation());
llvm::Value *Addr =
EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
// Data == nullptr means the calling module has trap behaviour for this check.
llvm::Value *DataIsNotNullPtr =
Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
EmitTrapCheck(DataIsNotNullPtr);
llvm::StructType *SourceLocationTy =
llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
llvm::StructType *CfiCheckFailDataTy =
llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
llvm::Value *V = Builder.CreateConstGEP2_32(
CfiCheckFailDataTy,
Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
0);
Address CheckKindAddr(V, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
CGM.getLLVMContext(),
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
llvm::Value *ValidVtable = Builder.CreateZExt(
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
{Addr, AllVtables}),
IntPtrTy);
const std::pair<int, SanitizerMask> CheckKinds[] = {
{CFITCK_VCall, SanitizerKind::CFIVCall},
{CFITCK_NVCall, SanitizerKind::CFINVCall},
{CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
{CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
{CFITCK_ICall, SanitizerKind::CFIICall}};
SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks;
for (auto CheckKindMaskPair : CheckKinds) {
int Kind = CheckKindMaskPair.first;
SanitizerMask Mask = CheckKindMaskPair.second;
llvm::Value *Cond =
Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
if (CGM.getLangOpts().Sanitize.has(Mask))
EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
{Data, Addr, ValidVtable});
else
EmitTrapCheck(Cond);
}
FinishFunction();
// The only reference to this function will be created during LTO link.
// Make sure it survives until then.
CGM.addUsedGlobal(F);
}
void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
if (SanOpts.has(SanitizerKind::Unreachable)) {
SanitizerScope SanScope(this);
EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
SanitizerKind::Unreachable),
SanitizerHandler::BuiltinUnreachable,
EmitCheckSourceLocation(Loc), None);
}
Builder.CreateUnreachable();
}
void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
llvm::BasicBlock *Cont = createBasicBlock("cont");
// If we're optimizing, collapse all calls to trap down to just one per
// function to save on code size.
if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
TrapBB = createBasicBlock("trap");
Builder.CreateCondBr(Checked, Cont, TrapBB);
EmitBlock(TrapBB);
llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
Builder.CreateUnreachable();
} else {
Builder.CreateCondBr(Checked, Cont, TrapBB);
}
EmitBlock(Cont);
}
llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
CGM.getCodeGenOpts().TrapFuncName);
TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
}
return TrapCall;
}
Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
assert(E->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
// Expressions of array type can't be bitfields or vector elements.
LValue LV = EmitLValue(E);
Address Addr = LV.getAddress();
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = ConvertType(E->getType());
Addr = Builder.CreateElementBitCast(Addr, NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
"Expected pointer to array");
Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
// The result of this decay conversion points to an array element within the
// base lvalue. However, since TBAA currently does not support representing
// accesses to elements of member arrays, we conservatively represent accesses
// to the pointee object as if it had no any base lvalue specified.
// TODO: Support TBAA for member arrays.
QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
}
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
/// array to pointer, return the array subexpression.
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
// If this isn't just an array->pointer decay, bail out.
const auto *CE = dyn_cast<CastExpr>(E);
if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
return nullptr;
// If this is a decay from variable width array, bail out.
const Expr *SubExpr = CE->getSubExpr();
if (SubExpr->getType()->isVariableArrayType())
return nullptr;
return SubExpr;
}
static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
llvm::Value *ptr,
ArrayRef<llvm::Value*> indices,
bool inbounds,
bool signedIndices,
SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
CodeGenFunction::NotSubtraction, loc,
name);
} else {
return CGF.Builder.CreateGEP(ptr, indices, name);
}
}
static CharUnits getArrayElementAlign(CharUnits arrayAlign,
llvm::Value *idx,
CharUnits eltSize) {
// If we have a constant index, we can use the exact offset of the
// element we're accessing.
if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
CharUnits offset = constantIdx->getZExtValue() * eltSize;
return arrayAlign.alignmentAtOffset(offset);
// Otherwise, use the worst-case alignment for any element.
} else {
return arrayAlign.alignmentOfArrayElement(eltSize);
}
}
static QualType getFixedSizeElementType(const ASTContext &ctx,
const VariableArrayType *vla) {
QualType eltType;
do {
eltType = vla->getElementType();
} while ((vla = ctx.getAsVariableArrayType(eltType)));
return eltType;
}
static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
ArrayRef<llvm::Value *> indices,
QualType eltType, bool inbounds,
bool signedIndices, SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
// All the indices except that last must be zero.
#ifndef NDEBUG
for (auto idx : indices.drop_back())
assert(isa<llvm::ConstantInt>(idx) &&
cast<llvm::ConstantInt>(idx)->isZero());
#endif
// Determine the element size of the statically-sized base. This is
// the thing that the indices are expressed in terms of.
if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
eltType = getFixedSizeElementType(CGF.getContext(), vla);
}
// We can use that to compute the best alignment of the element.
CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
CharUnits eltAlign =
getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
llvm::Value *eltPtr;
auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
if (!CGF.IsInPreservedAIRegion || !LastIndex) {
eltPtr = emitArraySubscriptGEP(
CGF, addr.getPointer(), indices, inbounds, signedIndices,
loc, name);
} else {
// Remember the original array subscript for bpf target
unsigned idx = LastIndex->getZExtValue();
eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getPointer(),
indices.size() - 1,
idx);
}
return Address(eltPtr, eltAlign);
}
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
// The index must always be an integer, which is not an aggregate. Emit it
// in lexical order (this complexity is, sadly, required by C++17).
llvm::Value *IdxPre =
(E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
bool SignedIndices = false;
auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
auto *Idx = IdxPre;
if (E->getLHS() != E->getIdx()) {
assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
Idx = EmitScalarExpr(E->getIdx());
}
QualType IdxTy = E->getIdx()->getType();
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
SignedIndices |= IdxSigned;
if (SanOpts.has(SanitizerKind::ArrayBounds))
EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
// Extend or truncate the index type to 32 or 64-bits.
if (Promote && Idx->getType() != IntPtrTy)
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
return Idx;
};
IdxPre = nullptr;
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
if (E->getBase()->getType()->isVectorType() &&
!isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
LHS.getBaseInfo(), TBAAAccessInfo());
}
// All the other cases basically behave like simple offsetting.
// Handle the extvector case we ignored above.
if (isa<ExtVectorElementExpr>(E->getBase())) {
LValue LV = EmitLValue(E->getBase());
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Address Addr = EmitExtVectorElementLValue(LV);
QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
SignedIndices, E->getExprLoc());
return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, EltType));
}
LValueBaseInfo EltBaseInfo;
TBAAAccessInfo EltTBAAInfo;
Address Addr = Address::invalid();
if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
} else {
Idx = Builder.CreateNSWMul(Idx, numElements);
}
Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
!getLangOpts().isSignedOverflowDefined(),
SignedIndices, E->getExprLoc());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
// Emit the base pointer.
Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
llvm::Value *InterfaceSizeVal =
llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
// We don't necessarily build correct LLVM struct types for ObjC
// interfaces, so we can't rely on GEP to do this scaling
// correctly, so we need to cast to i8*. FIXME: is this actually
// true? A lot of other things in the fragile ABI would break...
llvm::Type *OrigBaseTy = Addr.getType();
Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
// Do the GEP.
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false,
SignedIndices, E->getExprLoc());
Addr = Address(EltPtr, EltAlign);
// Cast back.
Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
// inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
// "gep x, i" here. Emit one "gep A, 0, i".
assert(Array->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
LValue ArrayLV;
// For simple multidimensional array indexing, set the 'accessed' flag for
// better bounds-checking of the base expression.
if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// Propagate the alignment from the array itself to the result.
Addr = emitArraySubscriptGEP(
*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
E->getExprLoc());
EltBaseInfo = ArrayLV.getBaseInfo();
EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
} else {
// The base must be a pointer; emit it with an estimate of its alignment.
Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
!getLangOpts().isSignedOverflowDefined(),
SignedIndices, E->getExprLoc());
}
LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
return LV;
}
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
LValueBaseInfo &BaseInfo,
TBAAAccessInfo &TBAAInfo,
QualType BaseTy, QualType ElTy,
bool IsLowerBound) {
LValue BaseLVal;
if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
if (BaseTy->isArrayType()) {
Address Addr = BaseLVal.getAddress();
BaseInfo = BaseLVal.getBaseInfo();
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = CGF.ConvertType(BaseTy);
Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
if (!BaseTy->isVariableArrayType()) {
assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
"Expected pointer to array");
Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
return CGF.Builder.CreateElementBitCast(Addr,
CGF.ConvertTypeForMem(ElTy));
}
LValueBaseInfo TypeBaseInfo;
TBAAAccessInfo TypeTBAAInfo;
CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &TypeBaseInfo,
&TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
}
LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
bool IsLowerBound) {
QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase());
QualType ResultExprTy;
if (auto *AT = getContext().getAsArrayType(BaseTy))
ResultExprTy = AT->getElementType();
else
ResultExprTy = BaseTy->getPointeeType();
llvm::Value *Idx = nullptr;
if (IsLowerBound || E->getColonLoc().isInvalid()) {
// Requesting lower bound or upper bound, but without provided length and
// without ':' symbol for the default length -> length = 1.
// Idx = LowerBound ?: 0;
if (auto *LowerBound = E->getLowerBound()) {
Idx = Builder.CreateIntCast(
EmitScalarExpr(LowerBound), IntPtrTy,
LowerBound->getType()->hasSignedIntegerRepresentation());
} else
Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
} else {
// Try to emit length or lower bound as constant. If this is possible, 1
// is subtracted from constant length or lower bound. Otherwise, emit LLVM
// IR (LB + Len) - 1.
auto &C = CGM.getContext();
auto *Length = E->getLength();
llvm::APSInt ConstLength;
if (Length) {
// Idx = LowerBound + Length - 1;
if (Length->isIntegerConstantExpr(ConstLength, C)) {
ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
Length = nullptr;
}
auto *LowerBound = E->getLowerBound();
llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) {
ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits);
LowerBound = nullptr;
}
if (!Length)
--ConstLength;
else if (!LowerBound)
--ConstLowerBound;
if (Length || LowerBound) {
auto *LowerBoundVal =
LowerBound
? Builder.CreateIntCast(
EmitScalarExpr(LowerBound), IntPtrTy,
LowerBound->getType()->hasSignedIntegerRepresentation())
: llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
auto *LengthVal =
Length
? Builder.CreateIntCast(
EmitScalarExpr(Length), IntPtrTy,
Length->getType()->hasSignedIntegerRepresentation())
: llvm::ConstantInt::get(IntPtrTy, ConstLength);
Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
/*HasNUW=*/false,
!getLangOpts().isSignedOverflowDefined());
if (Length && LowerBound) {
Idx = Builder.CreateSub(
Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
/*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
}
} else
Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
} else {
// Idx = ArraySize - 1;
QualType ArrayTy = BaseTy->isPointerType()
? E->getBase()->IgnoreParenImpCasts()->getType()
: BaseTy;
if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
Length = VAT->getSizeExpr();
if (Length->isIntegerConstantExpr(ConstLength, C))
Length = nullptr;
} else {
auto *CAT = C.getAsConstantArrayType(ArrayTy);
ConstLength = CAT->getSize();
}
if (Length) {
auto *LengthVal = Builder.CreateIntCast(
EmitScalarExpr(Length), IntPtrTy,
Length->getType()->hasSignedIntegerRepresentation());
Idx = Builder.CreateSub(
LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
/*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
} else {
ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
--ConstLength;
Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
}
}
}
assert(Idx);
Address EltPtr = Address::invalid();
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
Address Base =
emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
BaseTy, VLA->getElementType(), IsLowerBound);
// The element count here is the total number of non-VLA elements.
llvm::Value *NumElements = getVLASize(VLA).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
if (getLangOpts().isSignedOverflowDefined())
Idx = Builder.CreateMul(Idx, NumElements);
else
Idx = Builder.CreateNSWMul(Idx, NumElements);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
!getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
// inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
// "gep x, i" here. Emit one "gep A, 0, i".
assert(Array->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
LValue ArrayLV;
// For simple multidimensional array indexing, set the 'accessed' flag for
// better bounds-checking of the base expression.
if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
} else {
Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
TBAAInfo, BaseTy, ResultExprTy,
IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
!getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
}
return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
}
LValue CodeGenFunction::
EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
// Emit the base vector as an l-value.
LValue Base;
// ExtVectorElementExpr's base can either be a vector or pointer to vector.
if (E->isArrow()) {
// If it is a pointer to a vector, emit the address and form an lvalue with
// it.
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
Base.getQuals().removeObjCGCAttr();
} else if (E->getBase()->isGLValue()) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
// emit the base as an lvalue.
assert(E->getBase()->getType()->isVectorType());
Base = EmitLValue(E->getBase());
} else {
// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
assert(E->getBase()->getType()->isVectorType() &&
"Result must be a vector");
llvm::Value *Vec = EmitScalarExpr(E->getBase());
// Store the vector to memory (because LValue wants an address).
Address VecMem = CreateMemTemp(E->getBase()->getType());
Builder.CreateStore(Vec, VecMem);
Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
AlignmentSource::Decl);
}
QualType type =
E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
// Encode the element access list into a vector of unsigned indices.
SmallVector<uint32_t, 4> Indices;
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
llvm::Constant *CV =
llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
Base.getBaseInfo(), TBAAAccessInfo());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
llvm::Constant *BaseElts = Base.getExtVectorElts();
SmallVector<llvm::Constant *, 4> CElts;
for (unsigned i = 0, e = Indices.size(); i != e; ++i)
CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
Base.getBaseInfo(), TBAAAccessInfo());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
EmitIgnoredExpr(E->getBase());
return EmitDeclRefLValue(DRE);
}
Expr *BaseExpr = E->getBase();
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
LValue BaseLV;
if (E->isArrow()) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
QualType PtrTy = BaseExpr->getType()->getPointeeType();
SanitizerSet SkippedChecks;
bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
if (IsBaseCXXThis)
SkippedChecks.set(SanitizerKind::Alignment, true);
if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
} else
BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
NamedDecl *ND = E->getMemberDecl();
if (auto *Field = dyn_cast<FieldDecl>(ND)) {
LValue LV = EmitLValueForField(BaseLV, Field);
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
llvm_unreachable("Unhandled member declaration!");
}
/// Given that we are currently emitting a lambda, emit an l-value for
/// one of its members.
LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
QualType LambdaTagType =
getContext().getTagDeclType(Field->getParent());
LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
return EmitLValueForField(LambdaLV, Field);
}
/// Get the field index in the debug info. The debug info structure/union
/// will ignore the unnamed bitfields.
unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
unsigned FieldIndex) {
unsigned I = 0, Skipped = 0;
for (auto F : Rec->getDefinition()->fields()) {
if (I == FieldIndex)
break;
if (F->isUnnamedBitfield())
Skipped++;
I++;
}
return FieldIndex - Skipped;
}
/// Get the address of a zero-sized field within a record. The resulting
/// address doesn't necessarily have the right type.
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
const FieldDecl *Field) {
CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
CGF.getContext().getFieldOffset(Field));
if (Offset.isZero())
return Base;
Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
}
/// Drill down to the storage of a field without walking into
/// reference types.
///
/// The resulting address doesn't necessarily have the right type.
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
const FieldDecl *field) {
if (field->isZeroSize(CGF.getContext()))
return emitAddrOfZeroSizeField(CGF, base, field);
const RecordDecl *rec = field->getParent();
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
return CGF.Builder.CreateStructGEP(base, idx, field->getName());
}
static Address emitPreserveStructAccess(CodeGenFunction &CGF, Address base,
const FieldDecl *field) {
const RecordDecl *rec = field->getParent();
llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateRecordType(
CGF.getContext().getRecordType(rec), rec->getLocation());
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
return CGF.Builder.CreatePreserveStructAccessIndex(
base, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
}
static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
if (!RD)
return false;
if (RD->isDynamicClass())
return true;
for (const auto &Base : RD->bases())
if (hasAnyVptr(Base.getType(), Context))
return true;
for (const FieldDecl *Field : RD->fields())
if (hasAnyVptr(Field->getType(), Context))
return true;
return false;
}
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
LValueBaseInfo BaseInfo = base.getBaseInfo();
if (field->isBitField()) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
// Get the access type.
llvm::Type *FieldIntTy =
llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
QualType fieldType =
field->getType().withCVRQualifiers(base.getVRQualifiers());
// TODO: Support TBAA for bit fields.
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
TBAAAccessInfo());
}
// Fields of may-alias structures are may-alias themselves.
// FIXME: this should get propagated down through anonymous structs
// and unions.
QualType FieldType = field->getType();
const RecordDecl *rec = field->getParent();
AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
TBAAAccessInfo FieldTBAAInfo;
if (base.getTBAAInfo().isMayAlias() ||
rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
} else if (rec->isUnion()) {
// TODO: Support TBAA for unions.
FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
} else {
// If no base type been assigned for the base access, then try to generate
// one for this base lvalue.
FieldTBAAInfo = base.getTBAAInfo();
if (!FieldTBAAInfo.BaseType) {
FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
assert(!FieldTBAAInfo.Offset &&
"Nonzero offset for an access with no base type!");
}
// Adjust offset to be relative to the base type.
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(field->getParent());
unsigned CharWidth = getContext().getCharWidth();
if (FieldTBAAInfo.BaseType)
FieldTBAAInfo.Offset +=
Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
// Update the final access type and size.
FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
FieldTBAAInfo.Size =
getContext().getTypeSizeInChars(FieldType).getQuantity();
}
Address addr = base.getAddress();
if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
if (CGM.getCodeGenOpts().StrictVTablePointers &&
ClassDef->isDynamicClass()) {
// Getting to any field of dynamic object requires stripping dynamic
// information provided by invariant.group. This is because accessing
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
addr = Address(stripped, addr.getAlignment());
}
}
unsigned RecordCVR = base.getVRQualifiers();
if (rec->isUnion()) {
// For unions, there is no pointer adjustment.
assert(!FieldType->isReferenceType() && "union has reference member");
if (CGM.getCodeGenOpts().StrictVTablePointers &&
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
addr.getAlignment());
if (IsInPreservedAIRegion) {
// Remember the original union field index
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
getContext().getRecordType(rec), rec->getLocation());
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
addr.getAlignment());
}
} else {
if (!IsInPreservedAIRegion)
// For structs, we GEP to the field that the record layout suggests.
addr = emitAddrOfFieldStorage(*this, addr, field);
else
// Remember the original struct field index
addr = emitPreserveStructAccess(*this, addr, field);
// If this is a reference field, load the reference right now.
if (FieldType->isReferenceType()) {
LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo,
FieldTBAAInfo);
if (RecordCVR & Qualifiers::Volatile)
RefLVal.getQuals().addVolatile();
addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
// Qualifiers on the struct don't apply to the referencee.
RecordCVR = 0;
FieldType = FieldType->getPointeeType();
}
}
// Make sure that the address is pointing to the right type. This is critical
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
// type.
addr = Builder.CreateElementBitCast(
addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
LV.getQuals().addCVRQualifiers(RecordCVR);
// __weak attribute on a field is ignored.
if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
LV.getQuals().removeObjCGCAttr();
return LV;
}
LValue
CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
const FieldDecl *Field) {
QualType FieldType = Field->getType();
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
// TODO: Generate TBAA information that describes this access as a structure
// member access and not just an access to an object of the field's type. This
// should be similar to what we do in EmitLValueForField().
LValueBaseInfo BaseInfo = Base.getBaseInfo();
AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
return MakeAddrLValue(V, FieldType, FieldBaseInfo,
CGM.getTBAAInfoForSubobject(Base, FieldType));
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
if (E->isFileScope()) {
ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
}
if (E->getType()->isVariablyModifiedType())
// make sure to emit the VLA size.
EmitVariablyModifiedType(E->getType());
Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
return Result;
}
LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
if (!E->isGLValue())
// Initializing an aggregate temporary in C++11: T{...}.
return EmitAggExprToLValue(E);
// An lvalue initializer list must be initializing a reference.
assert(E->isTransparent() && "non-transparent glvalue init list");
return EmitLValue(E->getInit(0));
}
/// Emit the operand of a glvalue conditional operator. This is either a glvalue
/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
/// LValue is returned and the current block has been terminated.
static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
const Expr *Operand) {
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
return None;
}
return CGF.EmitLValue(Operand);
}
LValue CodeGenFunction::
EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
if (!expr->isGLValue()) {
// ?: here should be an aggregate.
assert(hasAggregateEvaluationKind(expr->getType()) &&
"Unexpected conditional operator!");
return EmitAggExprToLValue(expr);
}
OpaqueValueMapping binding(*this, expr);
const Expr *condExpr = expr->getCond();
bool CondExprBool;
if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
if (!CondExprBool) std::swap(live, dead);
if (!ContainsLabel(dead)) {
// If the true case is live, we need to track its region.
if (CondExprBool)
incrementProfileCounter(expr);
return EmitLValue(live);
}
}
llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
// Any temporaries created here are conditional.
EmitBlock(lhsBlock);
incrementProfileCounter(expr);
eval.begin(*this);
Optional<LValue> lhs =
EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
eval.end(*this);
if (lhs && !lhs->isSimple())
return EmitUnsupportedLValue(expr, "conditional operator");
lhsBlock = Builder.GetInsertBlock();
if (lhs)
Builder.CreateBr(contBlock);
// Any temporaries created here are conditional.
EmitBlock(rhsBlock);
eval.begin(*this);
Optional<LValue> rhs =
EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
eval.end(*this);
if (rhs && !rhs->isSimple())
return EmitUnsupportedLValue(expr, "conditional operator");
rhsBlock = Builder.GetInsertBlock();
EmitBlock(contBlock);
if (lhs && rhs) {
llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(),
2, "cond-lvalue");
phi->addIncoming(lhs->getPointer(), lhsBlock);
phi->addIncoming(rhs->getPointer(), rhsBlock);
Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
AlignmentSource alignSource =
std::max(lhs->getBaseInfo().getAlignmentSource(),
rhs->getBaseInfo().getAlignmentSource());
TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
lhs->getTBAAInfo(), rhs->getTBAAInfo());
return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
TBAAInfo);
} else {
assert((lhs || rhs) &&
"both operands of glvalue conditional are throw-expressions?");
return lhs ? *lhs : *rhs;
}
}
/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
/// type. If the cast is to a reference, we can have the usual lvalue result,
/// otherwise if a cast is needed by the code generator in an lvalue context,
/// then it must mean that we need the address of an aggregate in order to
/// access one of its members. This can happen for all the reasons that casts
/// are permitted with aggregate result, including noop aggregate casts, and
/// cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_ToVoid:
case CK_BitCast:
case CK_LValueToRValueBitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
case CK_NullToPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
case CK_VectorSplat:
case CK_IntegralCast:
case CK_BooleanToSignedIntegral:
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_FloatingRealToComplex:
case CK_FloatingComplexToReal:
case CK_FloatingComplexToBoolean:
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralRealToComplex:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
case CK_ReinterpretMemberPointer:
case CK_AnyPointerToBlockPointerCast:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_IntToOCLSampler:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
case CK_BuiltinFnToFnPtr:
llvm_unreachable("builtin functions are handled elsewhere");
// These are never l-values; just use the aggregate emission code.
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
return EmitAggExprToLValue(E);
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
case CK_ConstructorConversion:
case CK_UserDefinedConversion:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_NoOp:
case CK_LValueToRValue:
return EmitLValue(E->getSubExpr());
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
const RecordType *DerivedClassTy =
E->getSubExpr()->getType()->getAs<RecordType>();
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
Address This = LV.getAddress();
// Perform the derived-to-base conversion
Address Base = GetAddressOfBaseClass(
This, DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false, E->getExprLoc());
// TODO: Support accesses to members of base classes in TBAA. For now, we
// conservatively pretend that the complete object is of the base class
// type.
return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
case CK_ToUnion:
return EmitAggExprToLValue(E);
case CK_BaseToDerived: {
const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
// Perform the base-to-derived conversion
Address Derived =
GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (sanitizePerformTypeCheck())
EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
Derived.getPointer(), E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
/*MayBeNull=*/false, CFITCK_DerivedCast,
E->getBeginLoc());
return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
case CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const auto *CE = cast<ExplicitCastExpr>(E);
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
Address V = Builder.CreateBitCast(LV.getAddress(),
ConvertType(CE->getTypeAsWritten()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
/*MayBeNull=*/false, CFITCK_UnrelatedCast,
E->getBeginLoc());
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
case CK_AddressSpaceConversion: {
LValue LV = EmitLValue(E->getSubExpr());
QualType DestTy = getContext().getPointerType(E->getType());
llvm::Value *V = getTargetHooks().performAddrSpaceCast(
*this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
Address V = Builder.CreateElementBitCast(LV.getAddress(),
ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
case CK_ZeroToOCLOpaqueType:
llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
}
llvm_unreachable("Unhandled lvalue cast kind?");
}
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
assert(OpaqueValueMappingData::shouldBindAsLValue(e));
return getOrCreateOpaqueLValueMapping(e);
}
LValue
CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
assert(OpaqueValueMapping::shouldBindAsLValue(e));
llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
it = OpaqueLValues.find(e);
if (it != OpaqueLValues.end())
return it->second;
assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
return EmitLValue(e->getSourceExpr());
}
RValue
CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
assert(!OpaqueValueMapping::shouldBindAsLValue(e));
llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
it = OpaqueRValues.find(e);
if (it != OpaqueRValues.end())
return it->second;
assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
return EmitAnyExpr(e->getSourceExpr());
}
RValue CodeGenFunction::EmitRValueForField(LValue LV,
const FieldDecl *FD,
SourceLocation Loc) {
QualType FT = FD->getType();
LValue FieldLV = EmitLValueForField(LV, FD);
switch (getEvaluationKind(FT)) {
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
case TEK_Aggregate:
return FieldLV.asAggregateRValue();
case TEK_Scalar:
// This routine is used to load fields one-by-one to perform a copy, so
// don't load reference fields.
if (FD->getType()->isReferenceType())
return RValue::get(FieldLV.getPointer());
return EmitLoadOfLValue(FieldLV, Loc);
}
llvm_unreachable("bad evaluation kind");
}
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
return EmitBlockCallExpr(E, ReturnValue);
if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
return EmitCXXMemberCallExpr(CE, ReturnValue);
if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
return EmitCUDAKernelCallExpr(CE, ReturnValue);
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
if (const CXXMethodDecl *MD =
dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
CGCallee callee = EmitCallee(E->getCallee());
if (callee.isBuiltin()) {
return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
E, ReturnValue);
}
if (callee.isPseudoDestructor()) {
return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
}
return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
}
/// Emit a CallExpr without considering whether it might be a subclass.
RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
CGCallee Callee = EmitCallee(E->getCallee());
return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
}
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
if (auto builtinID = FD->getBuiltinID()) {
return CGCallee::forBuiltin(builtinID, FD);
}
llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
E = E->IgnoreParens();
// Look through function-to-pointer decay.
if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
return EmitCallee(ICE->getSubExpr());
}
// Resolve direct calls.
} else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
return EmitDirectCallee(*this, FD);
}
} else if (auto ME = dyn_cast<MemberExpr>(E)) {
if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
EmitIgnoredExpr(ME->getBase());
return EmitDirectCallee(*this, FD);
}
// Look through template substitutions.
} else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
return EmitCallee(NTTP->getReplacement());
// Treat pseudo-destructor calls differently.
} else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
return CGCallee::forPseudoDestructor(PDE);
}
// Otherwise, we have an indirect reference.
llvm::Value *calleePtr;
QualType functionType;
if (auto ptrType = E->getType()->getAs<PointerType>()) {
calleePtr = EmitScalarExpr(E);
functionType = ptrType->getPointeeType();
} else {
functionType = E->getType();
calleePtr = EmitLValue(E).getPointer();
}
assert(functionType->isFunctionType());
GlobalDecl GD;
if (const auto *VD =
dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
GD = GlobalDecl(VD);
CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
CGCallee callee(calleeInfo, calleePtr);
return callee;
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
// Comma expressions just emit their LHS then their RHS as an l-value.
if (E->getOpcode() == BO_Comma) {
EmitIgnoredExpr(E->getLHS());
EnsureInsertPoint();
return EmitLValue(E->getRHS());
}
if (E->getOpcode() == BO_PtrMemD ||
E->getOpcode() == BO_PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
// Note that in all of these cases, __block variables need the RHS
// evaluated first just in case the variable gets moved by the RHS.
switch (getEvaluationKind(E->getType())) {
case TEK_Scalar: {
switch (E->getLHS()->getType().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
return EmitARCStoreStrong(E, /*ignored*/ false).first;
case Qualifiers::OCL_Autoreleasing:
return EmitARCStoreAutoreleasing(E).first;
// No reason to do any of these differently.
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Weak:
break;
}
RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
if (RV.isScalar())
EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
EmitStoreThroughLValue(RV, LV);
return LV;
}
case TEK_Complex:
return EmitComplexAssignmentLValue(E);
case TEK_Aggregate:
return EmitAggExprToLValue(E);
}
llvm_unreachable("bad evaluation kind");
}
LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
RValue RV = EmitCallExpr(E);
if (!RV.isScalar())
return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
AlignmentSource::Decl);
assert(E->getCallReturnType(getContext())->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
// FIXME: This shouldn't require another copy.
return EmitAggExprToLValue(E);
}
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
&& "binding l-value to type which needs a temporary");
AggValueSlot Slot = CreateAggTemp(E->getType());
EmitCXXConstructExpr(E, Slot);
return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
ConvertType(E->getType()));
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
if (!RV.isScalar())
return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
AlignmentSource::Decl);
assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
Address V =
CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
}
LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
Ivar, CVRQualifiers);
}
LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
// FIXME: A lot of the code below could be shared with EmitMemberExpr.
llvm::Value *BaseValue = nullptr;
const Expr *BaseExpr = E->getBase();
Qualifiers BaseQuals;
QualType ObjectTy;
if (E->isArrow()) {
BaseValue = EmitScalarExpr(BaseExpr);
ObjectTy = BaseExpr->getType()->getPointeeType();
BaseQuals = ObjectTy.getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
BaseValue = BaseLV.getPointer();
ObjectTy = BaseExpr->getType();
BaseQuals = ObjectTy.getQualifiers();
}
LValue LV =
EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
BaseQuals.getCVRQualifiers());
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
AlignmentSource::Decl);
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
const CallExpr *E, ReturnValueSlot ReturnValue,
llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
const Decl *TargetDecl =
OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
CalleeType = getContext().getCanonicalType(CalleeType);
auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
CGCallee Callee = OrigCallee;
if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
SanitizerScope SanScope(this);
// Remove any (C++17) exception specifications, to allow calling e.g. a
// noexcept function through a non-noexcept pointer.
auto ProtoTy =
getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
llvm::Constant *FTRTTIConst =
CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty};
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
llvm::Value *CalleeSig =
Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
llvm::BasicBlock *Cont = createBasicBlock("cont");
llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
EmitBlock(TypeCheck);
llvm::Value *CalleeRTTIPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
llvm::Value *CalleeRTTIEncoded =
Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
llvm::Value *CalleeRTTI =
DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(CalleeType)};
EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
SanitizerHandler::FunctionTypeMismatch, StaticData,
{CalleePtr, CalleeRTTI, FTRTTIConst});
Builder.CreateBr(Cont);
EmitBlock(Cont);
}
}
const auto *FnType = cast<FunctionType>(PointeeType);
// If we are checking indirect calls and this call is indirect, check that the
// function pointer is a member of the bit set for the function type.
if (SanOpts.has(SanitizerKind::CFIICall) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
SanitizerScope SanScope(this);
EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
llvm::Metadata *MD;
if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
else
MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
llvm::Value *TypeTest = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(QualType(FnType, 0)),
};
if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
CastedCallee, StaticData);
} else {
EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
SanitizerHandler::CFICheckFail, StaticData,
{CastedCallee, llvm::UndefValue::get(IntPtrTy)});
}
}
CallArgList Args;
if (Chain)
Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
CGM.getContext().VoidPtrTy);
// C++17 requires that we evaluate arguments to a call using assignment syntax
// right-to-left, and that we evaluate arguments to certain other operators
// left-to-right. Note that we allow this to override the order dictated by
// the calling convention on the MS ABI, which means that parameter
// destruction order is not necessarily reverse construction order.
// FIXME: Revisit this based on C++ committee response to unimplementability.
EvaluationOrder Order = EvaluationOrder::Default;
if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
if (OCE->isAssignmentOp())
Order = EvaluationOrder::ForceRightToLeft;
else {
switch (OCE->getOperator()) {
case OO_LessLess:
case OO_GreaterGreater:
case OO_AmpAmp:
case OO_PipePipe:
case OO_Comma:
case OO_ArrowStar:
Order = EvaluationOrder::ForceLeftToRight;
break;
default:
break;
}
}
}
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
Args, FnType, /*ChainCall=*/Chain);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
// that does not include a prototype, [the default argument
// promotions are performed]. If the number of arguments does not
// equal the number of parameters, the behavior is undefined. If
// the function is defined with a type that includes a prototype,
// and either the prototype ends with an ellipsis (, ...) or the
// types of the arguments after promotion are not compatible with
// the types of the parameters, the behavior is undefined. If the
// function is defined with a type that does not include a
// prototype, and the types of the arguments after promotion are
// not compatible with those of the parameters after promotion,
// the behavior is undefined [except in some trivial cases].
// That is, in the general case, we should assume that a call
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
//
// Chain calls use this same code path to add the invisible chain parameter
// to the function type.
if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
llvm::Value *CalleePtr = Callee.getFunctionPointer();
CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
Callee.setFunctionPointer(CalleePtr);
}
llvm::CallBase *CallOrInvoke = nullptr;
RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
E->getExprLoc());
// Generate function declaration DISuprogram in order to be used
// in debug info about call sites.
if (CGDebugInfo *DI = getDebugInfo()) {
if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl))
DI->EmitFuncDeclForCallSite(CallOrInvoke, QualType(FnType, 0),
CalleeDecl);
}
return Call;
}
LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
Address BaseAddr = Address::invalid();
if (E->getOpcode() == BO_PtrMemI) {
BaseAddr = EmitPointerWithAlignment(E->getLHS());
} else {
BaseAddr = EmitLValue(E->getLHS()).getAddress();
}
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
const MemberPointerType *MPT
= E->getRHS()->getType()->getAs<MemberPointerType>();
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address MemberAddr =
EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
&TBAAInfo);
return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
}
/// Given the address of a temporary variable, produce an r-value of
/// its type.
RValue CodeGenFunction::convertTempToRValue(Address addr,
QualType type,
SourceLocation loc) {
LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
switch (getEvaluationKind(type)) {
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
case TEK_Aggregate:
return lvalue.asAggregateRValue();
case TEK_Scalar:
return RValue::get(EmitLoadOfScalar(lvalue, loc));
}
llvm_unreachable("bad evaluation kind");
}
void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
assert(Val->getType()->isFPOrFPVectorTy());
if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
return;
llvm::MDBuilder MDHelper(getLLVMContext());
llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
}
namespace {
struct LValueOrRValue {
LValue LV;
RValue RV;
};
}
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
const PseudoObjectExpr *E,
bool forLValue,
AggValueSlot slot) {
SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
// Find the result expression, if any.
const Expr *resultExpr = E->getResultExpr();
LValueOrRValue result;
for (PseudoObjectExpr::const_semantics_iterator
i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
const Expr *semantic = *i;
// If this semantic expression is an opaque value, bind it
// to the result of its source expression.
if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
// Skip unique OVEs.
if (ov->isUnique()) {
assert(ov != resultExpr &&
"A unique OVE cannot be used as the result expression");
continue;
}
// If this is the result expression, we may need to evaluate
// directly into the slot.
typedef CodeGenFunction::OpaqueValueMappingData OVMA;
OVMA opaqueData;
if (ov == resultExpr && ov->isRValue() && !forLValue &&
CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
CGF.EmitAggExpr(ov->getSourceExpr(), slot);
LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
AlignmentSource::Decl);
opaqueData = OVMA::bind(CGF, ov, LV);
result.RV = slot.asRValue();
// Otherwise, emit as normal.
} else {
opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
// If this is the result, also evaluate the result now.
if (ov == resultExpr) {
if (forLValue)
result.LV = CGF.EmitLValue(ov);
else
result.RV = CGF.EmitAnyExpr(ov, slot);
}
}
opaques.push_back(opaqueData);
// Otherwise, if the expression is the result, evaluate it
// and remember the result.
} else if (semantic == resultExpr) {
if (forLValue)
result.LV = CGF.EmitLValue(semantic);
else
result.RV = CGF.EmitAnyExpr(semantic, slot);
// Otherwise, evaluate the expression in an ignored context.
} else {
CGF.EmitIgnoredExpr(semantic);
}
}
// Unbind all the opaques now.
for (unsigned i = 0, e = opaques.size(); i != e; ++i)
opaques[i].unbind(CGF);
return result;
}
RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
AggValueSlot slot) {
return emitPseudoObjectExpr(*this, E, false, slot).RV;
}
LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
}
Index: stable/12/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp (revision 356465)
@@ -1,4297 +1,4313 @@
//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
// The class in this file generates structures that follow the Microsoft
// Visual C++ ABI, which is actually not very well documented at all outside
// of Microsoft.
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGVTables.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
#include "TargetInfo.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/VTableBuilder.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
namespace {
/// Holds all the vbtable globals for a given class.
struct VBTableGlobals {
const VPtrInfoVector *VBTables;
SmallVector<llvm::GlobalVariable *, 2> Globals;
};
class MicrosoftCXXABI : public CGCXXABI {
public:
MicrosoftCXXABI(CodeGenModule &CGM)
: CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
ClassHierarchyDescriptorType(nullptr),
CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
ThrowInfoType(nullptr) {}
bool HasThisReturn(GlobalDecl GD) const override;
bool hasMostDerivedReturn(GlobalDecl GD) const override;
bool classifyReturnType(CGFunctionInfo &FI) const override;
RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override;
bool isSRetParameterAfterThis() const override { return true; }
bool isThisCompleteObject(GlobalDecl GD) const override {
// The Microsoft ABI doesn't use separate complete-object vs.
// base-object variants of constructors, but it does of destructors.
if (isa<CXXDestructorDecl>(GD.getDecl())) {
switch (GD.getDtorType()) {
case Dtor_Complete:
case Dtor_Deleting:
return true;
case Dtor_Base:
return false;
case Dtor_Comdat: llvm_unreachable("emitting dtor comdat as function?");
}
llvm_unreachable("bad dtor kind");
}
// No other kinds.
return false;
}
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
FunctionArgList &Args) const override {
assert(Args.size() >= 2 &&
"expected the arglist to have at least two args!");
// The 'most_derived' parameter goes second if the ctor is variadic and
// has v-bases.
if (CD->getParent()->getNumVBases() > 0 &&
CD->getType()->castAs<FunctionProtoType>()->isVariadic())
return 2;
return 1;
}
std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD) override {
std::vector<CharUnits> VBPtrOffsets;
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
for (const std::unique_ptr<VPtrInfo> &VBT : *VBGlobals.VBTables) {
const ASTRecordLayout &SubobjectLayout =
Context.getASTRecordLayout(VBT->IntroducingObject);
CharUnits Offs = VBT->NonVirtualOffset;
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
VBPtrOffsets.push_back(Offs);
}
llvm::array_pod_sort(VBPtrOffsets.begin(), VBPtrOffsets.end());
return VBPtrOffsets;
}
StringRef GetPureVirtualCallName() override { return "_purecall"; }
StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD,
const VPtrInfo &Info);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override;
/// MSVC needs an extra flag to indicate a catchall.
CatchTypeInfo getCatchAllTypeInfo() override {
return CatchTypeInfo{nullptr, 0x40};
}
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
return false;
}
llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
llvm::BasicBlock *
EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) override;
llvm::BasicBlock *
EmitDtorCompleteObjectHandler(CodeGenFunction &CGF);
void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
const CXXRecordDecl *RD) override;
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
// Background on MSVC destructors
// ==============================
//
// Both Itanium and MSVC ABIs have destructor variants. The variant names
// roughly correspond in the following way:
// Itanium Microsoft
// Base -> no name, just ~Class
// Complete -> vbase destructor
// Deleting -> scalar deleting destructor
// vector deleting destructor
//
// The base and complete destructors are the same as in Itanium, although the
// complete destructor does not accept a VTT parameter when there are virtual
// bases. A separate mechanism involving vtordisps is used to ensure that
// virtual methods of destroyed subobjects are not called.
//
// The deleting destructors accept an i32 bitfield as a second parameter. Bit
// 1 indicates if the memory should be deleted. Bit 2 indicates if the this
// pointer points to an array. The scalar deleting destructor assumes that
// bit 2 is zero, and therefore does not contain a loop.
//
// For virtual destructors, only one entry is reserved in the vftable, and it
// always points to the vector deleting destructor. The vector deleting
// destructor is the most general, so it can be used to destroy objects in
// place, delete single heap objects, or delete arrays.
//
// A TU defining a non-inline destructor is only guaranteed to emit a base
// destructor, and all of the other variants are emitted on an as-needed basis
// in COMDATs. Because a non-base destructor can be emitted in a TU that
// lacks a definition for the destructor, non-base destructors must always
// delegate to or alias the base destructor.
AddedStructorArgs
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
/// Non-base dtors should be emitted as delegating thunks in this ABI.
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
CXXDtorType DT) const override {
return DT != Dtor_Base;
}
void setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
const CXXDestructorDecl *Dtor,
CXXDtorType DT) const override;
llvm::GlobalValue::LinkageTypes
getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor,
CXXDtorType DT) const override;
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
const CXXRecordDecl *
getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
// The vbases might be ordered differently in the final overrider object
// and the complete object, so the "this" argument may sometimes point to
// memory that has no particular type (e.g. past the complete object).
// In this case, we just use a generic pointer type.
// FIXME: might want to have a more precise type in the non-virtual
// multiple inheritance case.
if (ML.VBase || !ML.VFPtrOffset.isZero())
return nullptr;
}
return MD->getParent();
}
Address
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
Address This,
bool VirtualCall) override;
void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
FunctionArgList &Params) override;
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
AddedStructorArgs
addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating, CallArgList &Args) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This,
QualType ThisTy) override;
void emitVTableTypeMetadata(const VPtrInfo &Info, const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable);
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
CodeGenFunction::VPtr Vptr) override;
/// Don't initialize vptrs if dynamic class
/// is marked with with the 'novtable' attribute.
bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
return !VTableClass->hasAttr<MSNoVTableAttr>();
}
llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,
const CXXRecordDecl *VTableClass) override;
llvm::Value *getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
llvm::Constant *
getVTableAddressPointForConstExpr(BaseSubobject Base,
const CXXRecordDecl *VTableClass) override;
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType, Address This,
DeleteOrMemberCallExpr E) override;
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
CallArgList &CallArgs) override {
assert(GD.getDtorType() == Dtor_Deleting &&
"Only deleting destructor thunks are available in this ABI");
CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)),
getContext().IntTy);
}
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
llvm::GlobalVariable *
getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
llvm::GlobalVariable::LinkageTypes Linkage);
llvm::GlobalVariable *
getAddrOfVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
const CXXRecordDecl *DstRD) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
getMangleContext().mangleCXXVirtualDisplacementMap(SrcRD, DstRD, Out);
StringRef MangledName = OutName.str();
if (auto *VDispMap = CGM.getModule().getNamedGlobal(MangledName))
return VDispMap;
MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
unsigned NumEntries = 1 + SrcRD->getNumVBases();
SmallVector<llvm::Constant *, 4> Map(NumEntries,
llvm::UndefValue::get(CGM.IntTy));
Map[0] = llvm::ConstantInt::get(CGM.IntTy, 0);
bool AnyDifferent = false;
for (const auto &I : SrcRD->vbases()) {
const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
if (!DstRD->isVirtuallyDerivedFrom(VBase))
continue;
unsigned SrcVBIndex = VTContext.getVBTableIndex(SrcRD, VBase);
unsigned DstVBIndex = VTContext.getVBTableIndex(DstRD, VBase);
Map[SrcVBIndex] = llvm::ConstantInt::get(CGM.IntTy, DstVBIndex * 4);
AnyDifferent |= SrcVBIndex != DstVBIndex;
}
// This map would be useless, don't use it.
if (!AnyDifferent)
return nullptr;
llvm::ArrayType *VDispMapTy = llvm::ArrayType::get(CGM.IntTy, Map.size());
llvm::Constant *Init = llvm::ConstantArray::get(VDispMapTy, Map);
llvm::GlobalValue::LinkageTypes Linkage =
SrcRD->isExternallyVisible() && DstRD->isExternallyVisible()
? llvm::GlobalValue::LinkOnceODRLinkage
: llvm::GlobalValue::InternalLinkage;
auto *VDispMap = new llvm::GlobalVariable(
CGM.getModule(), VDispMapTy, /*isConstant=*/true, Linkage,
/*Initializer=*/Init, MangledName);
return VDispMap;
}
void emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD,
llvm::GlobalVariable *GV) const;
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
GlobalDecl GD, bool ReturnAdjustment) override {
GVALinkage Linkage =
getContext().GetGVALinkageForFunction(cast<FunctionDecl>(GD.getDecl()));
if (Linkage == GVA_Internal)
Thunk->setLinkage(llvm::GlobalValue::InternalLinkage);
else if (ReturnAdjustment)
Thunk->setLinkage(llvm::GlobalValue::WeakODRLinkage);
else
Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
}
bool exportThunk() override { return false; }
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
void EmitThreadLocalInitFuncs(
CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction() const override { return false; }
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) override;
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) override;
// ==== Notes on array cookies =========
//
// MSVC seems to only use cookies when the class has a destructor; a
// two-argument usual array deallocation function isn't sufficient.
//
// For example, this code prints "100" and "1":
// struct A {
// char x;
// void *operator new[](size_t sz) {
// printf("%u\n", sz);
// return malloc(sz);
// }
// void operator delete[](void *p, size_t sz) {
// printf("%u\n", sz);
// free(p);
// }
// };
// int main() {
// A *p = new A[100];
// delete[] p;
// }
// Whereas it prints "104" and "104" if you give A a destructor.
bool requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) override;
bool requiresArrayCookie(const CXXNewExpr *expr) override;
CharUnits getArrayCookieSizeImpl(QualType type) override;
Address InitializeArrayCookie(CodeGenFunction &CGF,
Address NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) override;
friend struct MSRTTIBuilder;
bool isImageRelative() const {
return CGM.getTarget().getPointerWidth(/*AddrSpace=*/0) == 64;
}
// 5 routines for constructing the llvm types for MS RTTI structs.
llvm::StructType *getTypeDescriptorType(StringRef TypeInfoString) {
llvm::SmallString<32> TDTypeName("rtti.TypeDescriptor");
TDTypeName += llvm::utostr(TypeInfoString.size());
llvm::StructType *&TypeDescriptorType =
TypeDescriptorTypeMap[TypeInfoString.size()];
if (TypeDescriptorType)
return TypeDescriptorType;
llvm::Type *FieldTypes[] = {
CGM.Int8PtrPtrTy,
CGM.Int8PtrTy,
llvm::ArrayType::get(CGM.Int8Ty, TypeInfoString.size() + 1)};
TypeDescriptorType =
llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, TDTypeName);
return TypeDescriptorType;
}
llvm::Type *getImageRelativeType(llvm::Type *PtrType) {
if (!isImageRelative())
return PtrType;
return CGM.IntTy;
}
llvm::StructType *getBaseClassDescriptorType() {
if (BaseClassDescriptorType)
return BaseClassDescriptorType;
llvm::Type *FieldTypes[] = {
getImageRelativeType(CGM.Int8PtrTy),
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
};
BaseClassDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
return BaseClassDescriptorType;
}
llvm::StructType *getClassHierarchyDescriptorType() {
if (ClassHierarchyDescriptorType)
return ClassHierarchyDescriptorType;
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
ClassHierarchyDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), "rtti.ClassHierarchyDescriptor");
llvm::Type *FieldTypes[] = {
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(
getBaseClassDescriptorType()->getPointerTo()->getPointerTo()),
};
ClassHierarchyDescriptorType->setBody(FieldTypes);
return ClassHierarchyDescriptorType;
}
llvm::StructType *getCompleteObjectLocatorType() {
if (CompleteObjectLocatorType)
return CompleteObjectLocatorType;
CompleteObjectLocatorType = llvm::StructType::create(
CGM.getLLVMContext(), "rtti.CompleteObjectLocator");
llvm::Type *FieldTypes[] = {
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(CGM.Int8PtrTy),
getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
getImageRelativeType(CompleteObjectLocatorType),
};
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
if (!isImageRelative())
FieldTypesRef = FieldTypesRef.drop_back();
CompleteObjectLocatorType->setBody(FieldTypesRef);
return CompleteObjectLocatorType;
}
llvm::GlobalVariable *getImageBase() {
StringRef Name = "__ImageBase";
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name))
return GV;
auto *GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty,
/*isConstant=*/true,
llvm::GlobalValue::ExternalLinkage,
/*Initializer=*/nullptr, Name);
CGM.setDSOLocal(GV);
return GV;
}
llvm::Constant *getImageRelativeConstant(llvm::Constant *PtrVal) {
if (!isImageRelative())
return PtrVal;
if (PtrVal->isNullValue())
return llvm::Constant::getNullValue(CGM.IntTy);
llvm::Constant *ImageBaseAsInt =
llvm::ConstantExpr::getPtrToInt(getImageBase(), CGM.IntPtrTy);
llvm::Constant *PtrValAsInt =
llvm::ConstantExpr::getPtrToInt(PtrVal, CGM.IntPtrTy);
llvm::Constant *Diff =
llvm::ConstantExpr::getSub(PtrValAsInt, ImageBaseAsInt,
/*HasNUW=*/true, /*HasNSW=*/true);
return llvm::ConstantExpr::getTrunc(Diff, CGM.IntTy);
}
private:
MicrosoftMangleContext &getMangleContext() {
return cast<MicrosoftMangleContext>(CodeGen::CGCXXABI::getMangleContext());
}
llvm::Constant *getZeroInt() {
return llvm::ConstantInt::get(CGM.IntTy, 0);
}
llvm::Constant *getAllOnesInt() {
return llvm::Constant::getAllOnesValue(CGM.IntTy);
}
CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) override;
void
GetNullMemberPointerFields(const MemberPointerType *MPT,
llvm::SmallVectorImpl<llvm::Constant *> &fields);
/// Shared code for virtual base adjustment. Returns the offset from
/// the vbptr to the virtual base. Optionally returns the address of the
/// vbptr itself.
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
Address Base,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtr = nullptr);
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
Address Base,
int32_t VBPtrOffset,
int32_t VBTableOffset,
llvm::Value **VBPtr = nullptr) {
assert(VBTableOffset % 4 == 0 && "should be byte offset into table of i32s");
llvm::Value *VBPOffset = llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
*VBTOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableOffset);
return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
}
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy);
/// Performs a full virtual base adjustment. Used to dereference
/// pointers to members of virtual bases.
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
const CXXRecordDecl *RD, Address Base,
llvm::Value *VirtualBaseAdjustmentOffset,
llvm::Value *VBPtrOffset /* optional */);
/// Emits a full member pointer with the fields common to data and
/// function member pointers.
llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField,
bool IsMemberFunction,
const CXXRecordDecl *RD,
CharUnits NonVirtualBaseAdjustment,
unsigned VBTableIndex);
bool MemberPointerConstantIsNull(const MemberPointerType *MPT,
llvm::Constant *MP);
/// - Initialize all vbptrs of 'this' with RD as the complete type.
void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD);
/// Caching wrapper around VBTableBuilder::enumerateVBTables().
const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD);
/// Generate a thunk for calling a virtual member function MD.
llvm::Function *EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML);
+ llvm::Constant *EmitMemberDataPointer(const CXXRecordDecl *RD,
+ CharUnits offset);
+
public:
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
bool isZeroInitializable(const MemberPointerType *MPT) override;
bool isMemberPointerConvertible(const MemberPointerType *MPT) const override {
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
return RD->hasAttr<MSInheritanceAttr>();
}
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) override;
llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) override;
llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
llvm::Value *EmitNonNullMemberPointerConversion(
const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
CastKind CK, CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
CGBuilderTy &Builder);
llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) override;
llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) override;
llvm::Constant *EmitMemberPointerConversion(
const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
CastKind CK, CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, llvm::Constant *Src);
CGCallee
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
Address This, llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
void emitCXXStructor(GlobalDecl GD) override;
llvm::StructType *getCatchableTypeType() {
if (CatchableTypeType)
return CatchableTypeType;
llvm::Type *FieldTypes[] = {
CGM.IntTy, // Flags
getImageRelativeType(CGM.Int8PtrTy), // TypeDescriptor
CGM.IntTy, // NonVirtualAdjustment
CGM.IntTy, // OffsetToVBPtr
CGM.IntTy, // VBTableIndex
CGM.IntTy, // Size
getImageRelativeType(CGM.Int8PtrTy) // CopyCtor
};
CatchableTypeType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "eh.CatchableType");
return CatchableTypeType;
}
llvm::StructType *getCatchableTypeArrayType(uint32_t NumEntries) {
llvm::StructType *&CatchableTypeArrayType =
CatchableTypeArrayTypeMap[NumEntries];
if (CatchableTypeArrayType)
return CatchableTypeArrayType;
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
CTATypeName += llvm::utostr(NumEntries);
llvm::Type *CTType =
getImageRelativeType(getCatchableTypeType()->getPointerTo());
llvm::Type *FieldTypes[] = {
CGM.IntTy, // NumEntries
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
};
CatchableTypeArrayType =
llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, CTATypeName);
return CatchableTypeArrayType;
}
llvm::StructType *getThrowInfoType() {
if (ThrowInfoType)
return ThrowInfoType;
llvm::Type *FieldTypes[] = {
CGM.IntTy, // Flags
getImageRelativeType(CGM.Int8PtrTy), // CleanupFn
getImageRelativeType(CGM.Int8PtrTy), // ForwardCompat
getImageRelativeType(CGM.Int8PtrTy) // CatchableTypeArray
};
ThrowInfoType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes,
"eh.ThrowInfo");
return ThrowInfoType;
}
llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
llvm::FunctionCallee Throw =
CGM.CreateRuntimeFunction(FTy, "_CxxThrowException");
// _CxxThrowException is stdcall on 32-bit x86 platforms.
if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
if (auto *Fn = dyn_cast<llvm::Function>(Throw.getCallee()))
Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
}
return Throw;
}
llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT);
llvm::Constant *getCatchableType(QualType T,
uint32_t NVOffset = 0,
int32_t VBPtrOffset = -1,
uint32_t VBIndex = 0);
llvm::GlobalVariable *getCatchableTypeArray(QualType T);
llvm::GlobalVariable *getThrowInfo(QualType T) override;
std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) override;
private:
typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalValue *> VFTablesMapTy;
/// All the vftables that have been referenced.
VFTablesMapTy VFTablesMap;
VTablesMapTy VTablesMap;
/// This set holds the record decls we've deferred vtable emission for.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> DeferredVFTables;
/// All the vbtables which have been referenced.
llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap;
/// Info on the global variable used to guard initialization of static locals.
/// The BitIndex field is only used for externally invisible declarations.
struct GuardInfo {
GuardInfo() : Guard(nullptr), BitIndex(0) {}
llvm::GlobalVariable *Guard;
unsigned BitIndex;
};
/// Map from DeclContext to the current guard variable. We assume that the
/// AST is visited in source code order.
llvm::DenseMap<const DeclContext *, GuardInfo> GuardVariableMap;
llvm::DenseMap<const DeclContext *, GuardInfo> ThreadLocalGuardVariableMap;
llvm::DenseMap<const DeclContext *, unsigned> ThreadSafeGuardNumMap;
llvm::DenseMap<size_t, llvm::StructType *> TypeDescriptorTypeMap;
llvm::StructType *BaseClassDescriptorType;
llvm::StructType *ClassHierarchyDescriptorType;
llvm::StructType *CompleteObjectLocatorType;
llvm::DenseMap<QualType, llvm::GlobalVariable *> CatchableTypeArrays;
llvm::StructType *CatchableTypeType;
llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap;
llvm::StructType *ThrowInfoType;
};
}
CGCXXABI::RecordArgABI
MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
switch (CGM.getTarget().getTriple().getArch()) {
default:
// FIXME: Implement for other architectures.
return RAA_Default;
case llvm::Triple::thumb:
// Use the simple Itanium rules for now.
// FIXME: This is incompatible with MSVC for arguments with a dtor and no
// copy ctor.
return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
case llvm::Triple::x86:
// All record arguments are passed in memory on x86. Decide whether to
// construct the object directly in argument memory, or to construct the
// argument elsewhere and copy the bytes during the call.
// If C++ prohibits us from making a copy, construct the arguments directly
// into argument memory.
if (!RD->canPassInRegisters())
return RAA_DirectInMemory;
// Otherwise, construct the argument into a temporary and copy the bytes
// into the outgoing argument memory.
return RAA_Default;
case llvm::Triple::x86_64:
case llvm::Triple::aarch64:
return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
}
llvm_unreachable("invalid enum");
}
void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
// FIXME: Provide a source location here even though there's no
// CXXMemberCallExpr for dtor call.
bool UseGlobalDelete = DE->isGlobalDelete();
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
llvm::Value *MDThis = EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
if (UseGlobalDelete)
CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType);
}
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {
llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())};
llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
else
CGF.EmitRuntimeCallOrInvoke(Fn, Args);
}
void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
const CXXCatchStmt *S) {
// In the MS ABI, the runtime handles the copy, and the catch handler is
// responsible for destruction.
VarDecl *CatchParam = S->getExceptionDecl();
llvm::BasicBlock *CatchPadBB = CGF.Builder.GetInsertBlock();
llvm::CatchPadInst *CPI =
cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
CGF.CurrentFuncletPad = CPI;
// If this is a catch-all or the catch parameter is unnamed, we don't need to
// emit an alloca to the object.
if (!CatchParam || !CatchParam->getDeclName()) {
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
return;
}
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.EmitAutoVarCleanups(var);
}
/// We need to perform a generic polymorphic operation (like a typeid
/// or a cast), which requires an object with a vfptr. Adjust the
/// address to point to an object with a vfptr.
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
// If the class itself has a vfptr, great. This check implicitly
// covers non-virtual base subobjects: a class with its own virtual
// functions would be a candidate to be a primary base.
if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr())
return std::make_tuple(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0),
SrcDecl);
// Okay, one of the vbases must have a vfptr, or else this isn't
// actually a polymorphic class.
const CXXRecordDecl *PolymorphicBase = nullptr;
for (auto &Base : SrcDecl->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr()) {
PolymorphicBase = BaseDecl;
break;
}
}
assert(PolymorphicBase && "polymorphic class has no apparent vfptr?");
llvm::Value *Offset =
GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
return std::make_tuple(Address(Ptr, VBaseAlign), Offset, PolymorphicBase);
}
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
QualType SrcRecordTy) {
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
return IsDeref &&
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
static llvm::CallBase *emitRTtypeidCall(CodeGenFunction &CGF,
llvm::Value *Argument) {
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false);
llvm::Value *Args[] = {Argument};
llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid");
return CGF.EmitRuntimeCallOrInvoke(Fn, Args);
}
void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::CallBase *Call =
emitRTtypeidCall(CGF, llvm::Constant::getNullValue(CGM.VoidPtrTy));
Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
std::tie(ThisPtr, std::ignore, std::ignore) =
performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer());
return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) {
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
return SrcIsPtr &&
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
llvm::Value *DestRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
llvm::Value *Offset;
std::tie(This, Offset, std::ignore) =
performBaseAdjustment(CGF, This, SrcRecordTy);
llvm::Value *ThisPtr = This.getPointer();
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
// PVOID __RTDynamicCast(
// PVOID inptr,
// LONG VfDelta,
// PVOID SrcType,
// PVOID TargetType,
// BOOL isReference)
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy, CGF.Int32Ty, CGF.Int8PtrTy,
CGF.Int8PtrTy, CGF.Int32Ty};
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTDynamicCast");
llvm::Value *Args[] = {
ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args);
return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
}
llvm::Value *
MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) {
std::tie(Value, std::ignore, std::ignore) =
performBaseAdjustment(CGF, Value, SrcRecordTy);
// PVOID __RTCastToVoid(
// PVOID inptr)
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
llvm::Value *Args[] = {Value.getPointer()};
return CGF.EmitRuntimeCall(Function, Args);
}
bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
return false;
}
llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
const ASTContext &Context = getContext();
int64_t VBPtrChars =
Context.getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity();
llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
CharUnits IntSize = Context.getTypeSizeInChars(Context.IntTy);
CharUnits VBTableChars =
IntSize *
CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl);
llvm::Value *VBTableOffset =
llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
llvm::Value *VBPtrToNewBase =
GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset);
VBPtrToNewBase =
CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase);
}
bool MicrosoftCXXABI::HasThisReturn(GlobalDecl GD) const {
return isa<CXXConstructorDecl>(GD.getDecl());
}
static bool isDeletingDtor(GlobalDecl GD) {
return isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() == Dtor_Deleting;
}
bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
return isDeletingDtor(GD);
}
static bool IsSizeGreaterThan128(const CXXRecordDecl *RD) {
return RD->getASTContext().getTypeSize(RD->getTypeForDecl()) > 128;
}
static bool hasMicrosoftABIRestrictions(const CXXRecordDecl *RD) {
// For AArch64, we use the C++14 definition of an aggregate, so we also
// check for:
// No private or protected non static data members.
// No base classes
// No virtual functions
// Additionally, we need to ensure that there is a trivial copy assignment
// operator, a trivial destructor and no user-provided constructors.
if (RD->hasProtectedFields() || RD->hasPrivateFields())
return true;
if (RD->getNumBases() > 0)
return true;
if (RD->isPolymorphic())
return true;
if (RD->hasNonTrivialCopyAssignment())
return true;
for (const CXXConstructorDecl *Ctor : RD->ctors())
if (Ctor->isUserProvided())
return true;
if (RD->hasNonTrivialDestructor())
return true;
return false;
}
bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
if (!RD)
return false;
bool isAArch64 = CGM.getTarget().getTriple().isAArch64();
bool isSimple = !isAArch64 || !hasMicrosoftABIRestrictions(RD);
bool isIndirectReturn =
isAArch64 ? (!RD->canPassInRegisters() ||
IsSizeGreaterThan128(RD))
: !RD->isPOD();
bool isInstanceMethod = FI.isInstanceMethod();
if (isIndirectReturn || !isSimple || isInstanceMethod) {
CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
FI.getReturnInfo().setSRetAfterThis(isInstanceMethod);
FI.getReturnInfo().setInReg(isAArch64 &&
!(isSimple && IsSizeGreaterThan128(RD)));
return true;
}
// Otherwise, use the C ABI rules.
return false;
}
llvm::BasicBlock *
MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
assert(IsMostDerivedClass &&
"ctor for a class with virtual bases must have an implicit parameter");
llvm::Value *IsCompleteObject =
CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
llvm::BasicBlock *CallVbaseCtorsBB = CGF.createBasicBlock("ctor.init_vbases");
llvm::BasicBlock *SkipVbaseCtorsBB = CGF.createBasicBlock("ctor.skip_vbases");
CGF.Builder.CreateCondBr(IsCompleteObject,
CallVbaseCtorsBB, SkipVbaseCtorsBB);
CGF.EmitBlock(CallVbaseCtorsBB);
// Fill in the vbtable pointers here.
EmitVBPtrStores(CGF, RD);
// CGF will put the base ctor calls in this basic block for us later.
return SkipVbaseCtorsBB;
}
llvm::BasicBlock *
MicrosoftCXXABI::EmitDtorCompleteObjectHandler(CodeGenFunction &CGF) {
llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
assert(IsMostDerivedClass &&
"ctor for a class with virtual bases must have an implicit parameter");
llvm::Value *IsCompleteObject =
CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
llvm::BasicBlock *CallVbaseDtorsBB = CGF.createBasicBlock("Dtor.dtor_vbases");
llvm::BasicBlock *SkipVbaseDtorsBB = CGF.createBasicBlock("Dtor.skip_vbases");
CGF.Builder.CreateCondBr(IsCompleteObject,
CallVbaseDtorsBB, SkipVbaseDtorsBB);
CGF.EmitBlock(CallVbaseDtorsBB);
// CGF will put the base dtor calls in this basic block for us later.
return SkipVbaseDtorsBB;
}
void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
CodeGenFunction &CGF, const CXXRecordDecl *RD) {
// In most cases, an override for a vbase virtual method can adjust
// the "this" parameter by applying a constant offset.
// However, this is not enough while a constructor or a destructor of some
// class X is being executed if all the following conditions are met:
// - X has virtual bases, (1)
// - X overrides a virtual method M of a vbase Y, (2)
// - X itself is a vbase of the most derived class.
//
// If (1) and (2) are true, the vtorDisp for vbase Y is a hidden member of X
// which holds the extra amount of "this" adjustment we must do when we use
// the X vftables (i.e. during X ctor or dtor).
// Outside the ctors and dtors, the values of vtorDisps are zero.
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
typedef ASTRecordLayout::VBaseOffsetsMapTy VBOffsets;
const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
CGBuilderTy &Builder = CGF.Builder;
unsigned AS = getThisAddress(CGF).getAddressSpace();
llvm::Value *Int8This = nullptr; // Initialize lazily.
for (const CXXBaseSpecifier &S : RD->vbases()) {
const CXXRecordDecl *VBase = S.getType()->getAsCXXRecordDecl();
auto I = VBaseMap.find(VBase);
assert(I != VBaseMap.end());
if (!I->second.hasVtorDisp())
continue;
llvm::Value *VBaseOffset =
GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, VBase);
uint64_t ConstantVBaseOffset = I->second.VBaseOffset.getQuantity();
// vtorDisp_for_vbase = vbptr[vbase_idx] - offsetof(RD, vbase).
llvm::Value *VtorDispValue = Builder.CreateSub(
VBaseOffset, llvm::ConstantInt::get(CGM.PtrDiffTy, ConstantVBaseOffset),
"vtordisp.value");
VtorDispValue = Builder.CreateTruncOrBitCast(VtorDispValue, CGF.Int32Ty);
if (!Int8This)
Int8This = Builder.CreateBitCast(getThisValue(CGF),
CGF.Int8Ty->getPointerTo(AS));
llvm::Value *VtorDispPtr = Builder.CreateInBoundsGEP(Int8This, VBaseOffset);
// vtorDisp is always the 32-bits before the vbase in the class layout.
VtorDispPtr = Builder.CreateConstGEP1_32(VtorDispPtr, -4);
VtorDispPtr = Builder.CreateBitCast(
VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
CharUnits::fromQuantity(4));
}
}
static bool hasDefaultCXXMethodCC(ASTContext &Context,
const CXXMethodDecl *MD) {
CallingConv ExpectedCallingConv = Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
CallingConv ActualCallingConv =
MD->getType()->getAs<FunctionProtoType>()->getCallConv();
return ExpectedCallingConv == ActualCallingConv;
}
void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
// There's only one constructor type in this ABI.
CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
// Exported default constructors either have a simple call-site where they use
// the typical calling convention and have a single 'this' pointer for an
// argument -or- they get a wrapper function which appropriately thunks to the
// real default constructor. This thunk is the default constructor closure.
if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor())
if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) {
llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure);
Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
CGM.setGVProperties(Fn, D);
}
}
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
Address This = getThisAddress(CGF);
This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
const std::unique_ptr<VPtrInfo> &VBT = (*VBGlobals.VBTables)[I];
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
const ASTRecordLayout &SubobjectLayout =
Context.getASTRecordLayout(VBT->IntroducingObject);
CharUnits Offs = VBT->NonVirtualOffset;
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
"vbptr." + VBT->ObjectWithVPtr->getName());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
}
CGCXXABI::AddedStructorArgs
MicrosoftCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
AddedStructorArgs Added;
// TODO: 'for base' flag
if (isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() == Dtor_Deleting) {
// The scalar deleting destructor takes an implicit int parameter.
ArgTys.push_back(getContext().IntTy);
++Added.Suffix;
}
auto *CD = dyn_cast<CXXConstructorDecl>(GD.getDecl());
if (!CD)
return Added;
// All parameters are already in place except is_most_derived, which goes
// after 'this' if it's variadic and last if it's not.
const CXXRecordDecl *Class = CD->getParent();
const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>();
if (Class->getNumVBases()) {
if (FPT->isVariadic()) {
ArgTys.insert(ArgTys.begin() + 1, getContext().IntTy);
++Added.Prefix;
} else {
ArgTys.push_back(getContext().IntTy);
++Added.Suffix;
}
}
return Added;
}
void MicrosoftCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
const CXXDestructorDecl *Dtor,
CXXDtorType DT) const {
// Deleting destructor variants are never imported or exported. Give them the
// default storage class.
if (DT == Dtor_Deleting) {
GV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
} else {
const NamedDecl *ND = Dtor;
CGM.setDLLImportDLLExport(GV, ND);
}
}
llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
// Internal things are always internal, regardless of attributes. After this,
// we know the thunk is externally visible.
if (Linkage == GVA_Internal)
return llvm::GlobalValue::InternalLinkage;
switch (DT) {
case Dtor_Base:
// The base destructor most closely tracks the user-declared constructor, so
// we delegate back to the normal declarator case.
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
/*IsConstantVariable=*/false);
case Dtor_Complete:
// The complete destructor is like an inline function, but it may be
// imported and therefore must be exported as well. This requires changing
// the linkage if a DLL attribute is present.
if (Dtor->hasAttr<DLLExportAttr>())
return llvm::GlobalValue::WeakODRLinkage;
if (Dtor->hasAttr<DLLImportAttr>())
return llvm::GlobalValue::AvailableExternallyLinkage;
return llvm::GlobalValue::LinkOnceODRLinkage;
case Dtor_Deleting:
// Deleting destructors are like inline functions. They have vague linkage
// and are emitted everywhere they are used. They are internal if the class
// is internal.
return llvm::GlobalValue::LinkOnceODRLinkage;
case Dtor_Comdat:
llvm_unreachable("MS C++ ABI does not support comdat dtors");
}
llvm_unreachable("invalid dtor type");
}
void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
// The TU defining a dtor is only guaranteed to emit a base destructor. All
// other destructor variants are delegating thunks.
CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
}
CharUnits
MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
// Complete destructors take a pointer to the complete object as a
// parameter, thus don't need this adjustment.
if (GD.getDtorType() == Dtor_Complete)
return CharUnits();
// There's no Dtor_Base in vftable but it shares the this adjustment with
// the deleting one, so look it up instead.
GD = GlobalDecl(DD, Dtor_Deleting);
}
MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
CharUnits Adjustment = ML.VFPtrOffset;
// Normal virtual instance methods need to adjust from the vfptr that first
// defined the virtual method to the virtual base subobject, but destructors
// do not. The vector deleting destructor thunk applies this adjustment for
// us if necessary.
if (isa<CXXDestructorDecl>(MD))
Adjustment = CharUnits::Zero();
if (ML.VBase) {
const ASTRecordLayout &DerivedLayout =
getContext().getASTRecordLayout(MD->getParent());
Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase);
}
return Adjustment;
}
Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
CodeGenFunction &CGF, GlobalDecl GD, Address This,
bool VirtualCall) {
if (!VirtualCall) {
// If the call of a virtual function is not virtual, we just have to
// compensate for the adjustment the virtual function does in its prologue.
CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
if (Adjustment.isZero())
return This;
This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
assert(Adjustment.isPositive());
return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
GlobalDecl LookupGD = GD;
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
// Complete dtors take a pointer to the complete object,
// thus don't need adjustment.
if (GD.getDtorType() == Dtor_Complete)
return This;
// There's only Dtor_Deleting in vftable but it shares the this adjustment
// with the base one, so look up the deleting one instead.
LookupGD = GlobalDecl(DD, Dtor_Deleting);
}
MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
CharUnits StaticOffset = ML.VFPtrOffset;
// Base destructors expect 'this' to point to the beginning of the base
// subobject, not the first vfptr that happens to contain the virtual dtor.
// However, we still need to apply the virtual base adjustment.
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
StaticOffset = CharUnits::Zero();
Address Result = This;
if (ML.VBase) {
Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
const CXXRecordDecl *Derived = MD->getParent();
const CXXRecordDecl *VBase = ML.VBase;
llvm::Value *VBaseOffset =
GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
llvm::Value *VBasePtr =
CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
Result = Address(VBasePtr, VBaseAlign);
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
if (ML.VBase) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
// base that declares a method in the most derived class.
// FIXME: Update the code that emits this adjustment in thunks prologues.
Result = CGF.Builder.CreateConstByteGEP(Result, StaticOffset);
} else {
Result = CGF.Builder.CreateConstInBoundsByteGEP(Result, StaticOffset);
}
}
return Result;
}
void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
QualType &ResTy,
FunctionArgList &Params) {
ASTContext &Context = getContext();
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
auto *IsMostDerived = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
&Context.Idents.get("is_most_derived"), Context.IntTy,
ImplicitParamDecl::Other);
// The 'most_derived' parameter goes second if the ctor is variadic and last
// if it's not. Dtors can't be variadic.
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
if (FPT->isVariadic())
Params.insert(Params.begin() + 1, IsMostDerived);
else
Params.push_back(IsMostDerived);
getStructorImplicitParamDecl(CGF) = IsMostDerived;
} else if (isDeletingDtor(CGF.CurGD)) {
auto *ShouldDelete = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
&Context.Idents.get("should_call_delete"), Context.IntTy,
ImplicitParamDecl::Other);
Params.push_back(ShouldDelete);
getStructorImplicitParamDecl(CGF) = ShouldDelete;
}
}
void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
// Naked functions have no prolog.
if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
return;
// Overridden virtual methods of non-primary bases need to adjust the incoming
// 'this' pointer in the prologue. In this hierarchy, C::b will subtract
// sizeof(void*) to adjust from B* to C*:
// struct A { virtual void a(); };
// struct B { virtual void b(); };
// struct C : A, B { virtual void b(); };
//
// Leave the value stored in the 'this' alloca unadjusted, so that the
// debugger sees the unadjusted value. Microsoft debuggers require this, and
// will apply the ThisAdjustment in the method type information.
// FIXME: Do something better for DWARF debuggers, which won't expect this,
// without making our codegen depend on debug info settings.
llvm::Value *This = loadIncomingCXXThis(CGF);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
if (!CGF.CurFuncIsThunk && MD->isVirtual()) {
CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(CGF.CurGD);
if (!Adjustment.isZero()) {
unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS),
*thisTy = This->getType();
This = CGF.Builder.CreateBitCast(This, charPtrTy);
assert(Adjustment.isPositive());
This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
-Adjustment.getQuantity());
This = CGF.Builder.CreateBitCast(This, thisTy, "this.adjusted");
}
}
setCXXABIThisValue(CGF, This);
// If this is a function that the ABI specifies returns 'this', initialize
// the return slot to 'this' at the start of the function.
//
// Unlike the setting of return types, this is done within the ABI
// implementation instead of by clients of CGCXXABI because:
// 1) getThisValue is currently protected
// 2) in theory, an ABI could implement 'this' returns some other way;
// HasThisReturn only specifies a contract, not the implementation
if (HasThisReturn(CGF.CurGD))
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
else if (hasMostDerivedReturn(CGF.CurGD))
CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
CGF.ReturnValue);
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
assert(getStructorImplicitParamDecl(CGF) &&
"no implicit parameter for a constructor with virtual bases?");
getStructorImplicitParamValue(CGF)
= CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
"is_most_derived");
}
if (isDeletingDtor(CGF.CurGD)) {
assert(getStructorImplicitParamDecl(CGF) &&
"no implicit parameter for a deleting destructor?");
getStructorImplicitParamValue(CGF)
= CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
"should_call_delete");
}
}
CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating, CallArgList &Args) {
assert(Type == Ctor_Complete || Type == Ctor_Base);
// Check if we need a 'most_derived' parameter.
if (!D->getParent()->getNumVBases())
return AddedStructorArgs{};
// Add the 'most_derived' argument second if we are variadic or last if not.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
llvm::Value *MostDerivedArg;
if (Delegating) {
MostDerivedArg = getStructorImplicitParamValue(CGF);
} else {
MostDerivedArg = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
}
RValue RV = RValue::get(MostDerivedArg);
if (FPT->isVariadic()) {
Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
return AddedStructorArgs::prefix(1);
}
Args.add(RV, getContext().IntTy);
return AddedStructorArgs::suffix(1);
}
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This,
QualType ThisTy) {
// Use the base destructor variant in place of the complete destructor variant
// if the class has no virtual bases. This effectively implements some of the
// -mconstructor-aliases optimization, but as part of the MS C++ ABI.
if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
Type = Dtor_Base;
GlobalDecl GD(DD, Type);
CGCallee Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
"The deleting destructor should only be called via a virtual call");
This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type),
This, false);
}
llvm::BasicBlock *BaseDtorEndBB = nullptr;
if (ForVirtualBase && isa<CXXConstructorDecl>(CGF.CurCodeDecl)) {
BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
}
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
/*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
// Complete object handler should continue to be the remaining
CGF.Builder.CreateBr(BaseDtorEndBB);
CGF.EmitBlock(BaseDtorEndBB);
}
}
void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable) {
if (!CGM.getCodeGenOpts().LTOUnit)
return;
// The location of the first virtual function pointer in the virtual table,
// aka the "address point" on Itanium. This is at offset 0 if RTTI is
// disabled, or sizeof(void*) if RTTI is enabled.
CharUnits AddressPoint =
getContext().getLangOpts().RTTIData
? getContext().toCharUnitsFromBits(
getContext().getTargetInfo().getPointerWidth(0))
: CharUnits::Zero();
if (Info.PathToIntroducingObject.empty()) {
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
return;
}
// Add a bitset entry for the least derived base belonging to this vftable.
CGM.AddVTableTypeMetadata(VTable, AddressPoint,
Info.PathToIntroducingObject.back());
// Add a bitset entry for each derived class that is laid out at the same
// offset as the least derived base.
for (unsigned I = Info.PathToIntroducingObject.size() - 1; I != 0; --I) {
const CXXRecordDecl *DerivedRD = Info.PathToIntroducingObject[I - 1];
const CXXRecordDecl *BaseRD = Info.PathToIntroducingObject[I];
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(DerivedRD);
CharUnits Offset;
auto VBI = Layout.getVBaseOffsetsMap().find(BaseRD);
if (VBI == Layout.getVBaseOffsetsMap().end())
Offset = Layout.getBaseClassOffset(BaseRD);
else
Offset = VBI->second.VBaseOffset;
if (!Offset.isZero())
return;
CGM.AddVTableTypeMetadata(VTable, AddressPoint, DerivedRD);
}
// Finally do the same for the most derived class.
if (Info.FullOffsetInMDC.isZero())
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
}
void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) {
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD);
for (const std::unique_ptr<VPtrInfo>& Info : VFPtrs) {
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC);
if (VTable->hasInitializer())
continue;
const VTableLayout &VTLayout =
VFTContext.getVFTableLayout(RD, Info->FullOffsetInMDC);
llvm::Constant *RTTI = nullptr;
if (any_of(VTLayout.vtable_components(),
[](const VTableComponent &VTC) { return VTC.isRTTIKind(); }))
RTTI = getMSCompleteObjectLocator(RD, *Info);
ConstantInitBuilder Builder(CGM);
auto Components = Builder.beginStruct();
CGVT.createVTableInitializer(Components, VTLayout, RTTI);
Components.finishAndSetAsInitializer(VTable);
emitVTableTypeMetadata(*Info, RD, VTable);
}
}
bool MicrosoftCXXABI::isVirtualOffsetNeededForVTableField(
CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
return Vptr.NearestVBase != nullptr;
}
llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
const CXXRecordDecl *NearestVBase) {
llvm::Constant *VTableAddressPoint = getVTableAddressPoint(Base, VTableClass);
if (!VTableAddressPoint) {
assert(Base.getBase()->getNumVBases() &&
!getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr());
}
return VTableAddressPoint;
}
static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
const CXXRecordDecl *RD, const VPtrInfo &VFPtr,
SmallString<256> &Name) {
llvm::raw_svector_ostream Out(Name);
MangleContext.mangleCXXVFTable(RD, VFPtr.MangledPath, Out);
}
llvm::Constant *
MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
const CXXRecordDecl *VTableClass) {
(void)getAddrOfVTable(VTableClass, Base.getBaseOffset());
VFTableIdTy ID(VTableClass, Base.getBaseOffset());
return VFTablesMap[ID];
}
llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
BaseSubobject Base, const CXXRecordDecl *VTableClass) {
llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
assert(VFTable && "Couldn't find a vftable for the given base?");
return VFTable;
}
llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
// getAddrOfVTable may return 0 if asked to get an address of a vtable which
// shouldn't be used in the given record type. We want to cache this result in
// VFTablesMap, thus a simple zero check is not sufficient.
VFTableIdTy ID(RD, VPtrOffset);
VTablesMapTy::iterator I;
bool Inserted;
std::tie(I, Inserted) = VTablesMap.insert(std::make_pair(ID, nullptr));
if (!Inserted)
return I->second;
llvm::GlobalVariable *&VTable = I->second;
MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD);
if (DeferredVFTables.insert(RD).second) {
// We haven't processed this record type before.
// Queue up this vtable for possible deferred emission.
CGM.addDeferredVTable(RD);
#ifndef NDEBUG
// Create all the vftables at once in order to make sure each vftable has
// a unique mangled name.
llvm::StringSet<> ObservedMangledNames;
for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
SmallString<256> Name;
mangleVFTableName(getMangleContext(), RD, *VFPtrs[J], Name);
if (!ObservedMangledNames.insert(Name.str()).second)
llvm_unreachable("Already saw this mangling before?");
}
#endif
}
const std::unique_ptr<VPtrInfo> *VFPtrI = std::find_if(
VFPtrs.begin(), VFPtrs.end(), [&](const std::unique_ptr<VPtrInfo>& VPI) {
return VPI->FullOffsetInMDC == VPtrOffset;
});
if (VFPtrI == VFPtrs.end()) {
VFTablesMap[ID] = nullptr;
return nullptr;
}
const std::unique_ptr<VPtrInfo> &VFPtr = *VFPtrI;
SmallString<256> VFTableName;
mangleVFTableName(getMangleContext(), RD, *VFPtr, VFTableName);
// Classes marked __declspec(dllimport) need vftables generated on the
// import-side in order to support features like constexpr. No other
// translation unit relies on the emission of the local vftable, translation
// units are expected to generate them as needed.
//
// Because of this unique behavior, we maintain this logic here instead of
// getVTableLinkage.
llvm::GlobalValue::LinkageTypes VFTableLinkage =
RD->hasAttr<DLLImportAttr>() ? llvm::GlobalValue::LinkOnceODRLinkage
: CGM.getVTableLinkage(RD);
bool VFTableComesFromAnotherTU =
llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage) ||
llvm::GlobalValue::isExternalLinkage(VFTableLinkage);
bool VTableAliasIsRequred =
!VFTableComesFromAnotherTU && getContext().getLangOpts().RTTIData;
if (llvm::GlobalValue *VFTable =
CGM.getModule().getNamedGlobal(VFTableName)) {
VFTablesMap[ID] = VFTable;
VTable = VTableAliasIsRequred
? cast<llvm::GlobalVariable>(
cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
: cast<llvm::GlobalVariable>(VFTable);
return VTable;
}
const VTableLayout &VTLayout =
VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC);
llvm::GlobalValue::LinkageTypes VTableLinkage =
VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage;
StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str();
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
// Create a backing variable for the contents of VTable. The VTable may
// or may not include space for a pointer to RTTI data.
llvm::GlobalValue *VFTable;
VTable = new llvm::GlobalVariable(CGM.getModule(), VTableType,
/*isConstant=*/true, VTableLinkage,
/*Initializer=*/nullptr, VTableName);
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Comdat *C = nullptr;
if (!VFTableComesFromAnotherTU &&
(llvm::GlobalValue::isWeakForLinker(VFTableLinkage) ||
(llvm::GlobalValue::isLocalLinkage(VFTableLinkage) &&
VTableAliasIsRequred)))
C = CGM.getModule().getOrInsertComdat(VFTableName.str());
// Only insert a pointer into the VFTable for RTTI data if we are not
// importing it. We never reference the RTTI data directly so there is no
// need to make room for it.
if (VTableAliasIsRequred) {
llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.Int32Ty, 0),
llvm::ConstantInt::get(CGM.Int32Ty, 0),
llvm::ConstantInt::get(CGM.Int32Ty, 1)};
// Create a GEP which points just after the first entry in the VFTable,
// this should be the location of the first virtual method.
llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr(
VTable->getValueType(), VTable, GEPIndices);
if (llvm::GlobalValue::isWeakForLinker(VFTableLinkage)) {
VFTableLinkage = llvm::GlobalValue::ExternalLinkage;
if (C)
C->setSelectionKind(llvm::Comdat::Largest);
}
VFTable = llvm::GlobalAlias::create(CGM.Int8PtrTy,
/*AddressSpace=*/0, VFTableLinkage,
VFTableName.str(), VTableGEP,
&CGM.getModule());
VFTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
} else {
// We don't need a GlobalAlias to be a symbol for the VTable if we won't
// be referencing any RTTI data.
// The GlobalVariable will end up being an appropriate definition of the
// VFTable.
VFTable = VTable;
}
if (C)
VTable->setComdat(C);
if (RD->hasAttr<DLLExportAttr>())
VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
VFTablesMap[ID] = VFTable;
return VTable;
}
CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
Ty = Ty->getPointerTo()->getPointerTo();
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
// Compute the identity of the most derived class whose virtual table is
// located at the MethodVFTableLocation ML.
auto getObjectWithVPtr = [&] {
return llvm::find_if(VFTContext.getVFPtrOffsets(
ML.VBase ? ML.VBase : MethodDecl->getParent()),
[&](const std::unique_ptr<VPtrInfo> &Info) {
return Info->FullOffsetInMDC == ML.VFPtrOffset;
})
->get()
->ObjectWithVPtr;
};
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
getObjectWithVPtr(), VTable,
ML.Index * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
if (CGM.getCodeGenOpts().PrepareForLTO)
CGF.EmitTypeMetadataCodeForVCall(getObjectWithVPtr(), VTable, Loc);
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
CGCallee Callee(GD, VFunc);
return Callee;
}
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
Address This, DeleteOrMemberCallExpr E) {
auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
auto *D = E.dyn_cast<const CXXDeleteExpr *>();
assert((CE != nullptr) ^ (D != nullptr));
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
// We have only one destructor in the vftable but can get both behaviors
// by passing an implicit int parameter.
GlobalDecl GD(Dtor, Dtor_Deleting);
const CGFunctionInfo *FInfo =
&CGM.getTypes().arrangeCXXStructorDeclaration(GD);
llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
ASTContext &Context = getContext();
llvm::Value *ImplicitParam = llvm::ConstantInt::get(
llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
DtorType == Dtor_Deleting);
QualType ThisTy;
if (CE) {
ThisTy = CE->getObjectType();
} else {
ThisTy = D->getDestroyedType();
}
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
ImplicitParam, Context.IntTy, CE);
return RV.getScalarVal();
}
const VBTableGlobals &
MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
// At this layer, we can key the cache off of a single class, which is much
// easier than caching each vbtable individually.
llvm::DenseMap<const CXXRecordDecl*, VBTableGlobals>::iterator Entry;
bool Added;
std::tie(Entry, Added) =
VBTablesMap.insert(std::make_pair(RD, VBTableGlobals()));
VBTableGlobals &VBGlobals = Entry->second;
if (!Added)
return VBGlobals;
MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
VBGlobals.VBTables = &Context.enumerateVBTables(RD);
// Cache the globals for all vbtables so we don't have to recompute the
// mangled names.
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
for (VPtrInfoVector::const_iterator I = VBGlobals.VBTables->begin(),
E = VBGlobals.VBTables->end();
I != E; ++I) {
VBGlobals.Globals.push_back(getAddrOfVBTable(**I, RD, Linkage));
}
return VBGlobals;
}
llvm::Function *
MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML) {
assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) &&
"can't form pointers to ctors or virtual dtors");
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
getMangleContext().mangleVirtualMemPtrThunk(MD, ML, Out);
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
return cast<llvm::Function>(GV);
// Create the llvm::Function.
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeUnprototypedMustTailThunk(MD);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Function *ThunkFn =
llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage,
ThunkName.str(), &CGM.getModule());
assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
ThunkFn->setLinkage(MD->isExternallyVisible()
? llvm::GlobalValue::LinkOnceODRLinkage
: llvm::GlobalValue::InternalLinkage);
if (MD->isExternallyVisible())
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
// Add the "thunk" attribute so that LLVM knows that the return type is
// meaningless. These thunks can be used to call functions with differing
// return types, and the caller is required to cast the prototype
// appropriately to extract the correct value.
ThunkFn->addFnAttr("thunk");
// These thunks can be compared, so they are not unnamed.
ThunkFn->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
// Start codegen.
CodeGenFunction CGF(CGM);
CGF.CurGD = GlobalDecl(MD);
CGF.CurFuncIsThunk = true;
// Build FunctionArgs, but only include the implicit 'this' parameter
// declaration.
FunctionArgList FunctionArgs;
buildThisParam(CGF, FunctionArgs);
// Start defining the function.
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
FunctionArgs, MD->getLocation(), SourceLocation());
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
llvm::Value *VTable = CGF.GetVTablePtr(
getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo(), MD->getParent());
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
llvm::Value *Callee =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
CGF.EmitMustTailThunk(MD, getThisValue(CGF), {ThunkTy, Callee});
return ThunkFn;
}
void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
const std::unique_ptr<VPtrInfo>& VBT = (*VBGlobals.VBTables)[I];
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
if (GV->isDeclaration())
emitVBTableDefinition(*VBT, RD, GV);
}
}
llvm::GlobalVariable *
MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
llvm::GlobalVariable::LinkageTypes Linkage) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
getMangleContext().mangleCXXVBTable(RD, VBT.MangledPath, Out);
StringRef Name = OutName.str();
llvm::ArrayType *VBTableType =
llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ObjectWithVPtr->getNumVBases());
assert(!CGM.getModule().getNamedGlobal(Name) &&
"vbtable with this name already exists: mangling bug?");
CharUnits Alignment =
CGM.getContext().getTypeAlignInChars(CGM.getContext().IntTy);
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VBTableType, Linkage, Alignment.getQuantity());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
if (RD->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
else if (RD->hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
if (!GV->hasExternalLinkage())
emitVBTableDefinition(VBT, RD, GV);
return GV;
}
void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
const CXXRecordDecl *RD,
llvm::GlobalVariable *GV) const {
const CXXRecordDecl *ObjectWithVPtr = VBT.ObjectWithVPtr;
assert(RD->getNumVBases() && ObjectWithVPtr->getNumVBases() &&
"should only emit vbtables for classes with vbtables");
const ASTRecordLayout &BaseLayout =
getContext().getASTRecordLayout(VBT.IntroducingObject);
const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD);
SmallVector<llvm::Constant *, 4> Offsets(1 + ObjectWithVPtr->getNumVBases(),
nullptr);
// The offset from ObjectWithVPtr's vbptr to itself always leads.
CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
for (const auto &I : ObjectWithVPtr->vbases()) {
const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
assert(!Offset.isNegative());
// Make it relative to the subobject vbptr.
CharUnits CompleteVBPtrOffset = VBT.NonVirtualOffset + VBPtrOffset;
if (VBT.getVBaseWithVPtr())
CompleteVBPtrOffset +=
DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr());
Offset -= CompleteVBPtrOffset;
unsigned VBIndex = Context.getVBTableIndex(ObjectWithVPtr, VBase);
assert(Offsets[VBIndex] == nullptr && "The same vbindex seen twice?");
Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
}
assert(Offsets.size() ==
cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
->getElementType())->getNumElements());
llvm::ArrayType *VBTableType =
llvm::ArrayType::get(CGM.IntTy, Offsets.size());
llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
GV->setInitializer(Init);
if (RD->hasAttr<DLLImportAttr>())
GV->setLinkage(llvm::GlobalVariable::AvailableExternallyLinkage);
}
llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
Address This,
const ThisAdjustment &TA) {
if (TA.isEmpty())
return This.getPointer();
This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
llvm::Value *V;
if (TA.Virtual.isEmpty()) {
V = This.getPointer();
} else {
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
// Adjust the this argument based on the vtordisp value.
Address VtorDispPtr =
CGF.Builder.CreateConstInBoundsByteGEP(This,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
V = CGF.Builder.CreateGEP(This.getPointer(),
CGF.Builder.CreateNeg(VtorDisp));
// Unfortunately, having applied the vtordisp means that we no
// longer really have a known alignment for the vbptr step.
// We'll assume the vbptr is pointer-aligned.
if (TA.Virtual.Microsoft.VBPtrOffset) {
// If the final overrider is defined in a virtual base other than the one
// that holds the vfptr, we have to use a vtordispex thunk which looks up
// the vbtable of the derived class.
assert(TA.Virtual.Microsoft.VBPtrOffset > 0);
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
-TA.Virtual.Microsoft.VBPtrOffset,
TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
}
if (TA.NonVirtual) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
// base that declares a method in the most derived class.
V = CGF.Builder.CreateConstGEP1_32(V, TA.NonVirtual);
}
// Don't need to bitcast back, the call CodeGen will handle this.
return V;
}
llvm::Value *
MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
if (RA.isEmpty())
return Ret.getPointer();
auto OrigTy = Ret.getType();
Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
llvm::Value *V = Ret.getPointer();
if (RA.Virtual.Microsoft.VBIndex) {
assert(RA.Virtual.Microsoft.VBIndex > 0);
int32_t IntSize = CGF.getIntSize().getQuantity();
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
if (RA.NonVirtual)
V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
// Cast back to the original type.
return CGF.Builder.CreateBitCast(V, OrigTy);
}
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) {
// Microsoft seems to completely ignore the possibility of a
// two-argument usual deallocation function.
return elementType.isDestructedType();
}
bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
// Microsoft seems to completely ignore the possibility of a
// two-argument usual deallocation function.
return expr->getAllocatedType().isDestructedType();
}
CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
// The array cookie is always a size_t; we then pad that out to the
// alignment of the element type.
ASTContext &Ctx = getContext();
return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
Ctx.getTypeAlignInChars(type));
}
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) {
Address numElementsPtr =
CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address newPtr,
llvm::Value *numElements,
const CXXNewExpr *expr,
QualType elementType) {
assert(requiresArrayCookie(expr));
// The size of the cookie.
CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
// Compute an offset to the cookie.
Address cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
Address numElementsPtr
= CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
CGF.Builder.CreateStore(numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
// Create a function which calls the destructor.
llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr);
// extern "C" int __tlregdtor(void (*f)(void));
llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get(
CGF.IntTy, DtorStub->getType(), /*isVarArg=*/false);
llvm::FunctionCallee TLRegDtor = CGF.CGM.CreateRuntimeFunction(
TLRegDtorTy, "__tlregdtor", llvm::AttributeList(), /*Local=*/true);
if (llvm::Function *TLRegDtorFn =
dyn_cast<llvm::Function>(TLRegDtor.getCallee()))
TLRegDtorFn->setDoesNotThrow();
CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub);
}
void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.isNoDestroy(CGM.getContext()))
return;
if (D.getTLSKind())
return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
// The default behavior is to use atexit.
CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr);
}
void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
if (CXXThreadLocalInits.empty())
return;
CGM.AppendLinkerOptions(CGM.getTarget().getTriple().getArch() ==
llvm::Triple::x86
? "/include:___dyn_tls_init@12"
: "/include:__dyn_tls_init");
// This will create a GV in the .CRT$XDU section. It will point to our
// initialization function. The CRT will call all of these function
// pointers at start-up time and, eventually, at thread-creation time.
auto AddToXDU = [&CGM](llvm::Function *InitFunc) {
llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable(
CGM.getModule(), InitFunc->getType(), /*isConstant=*/true,
llvm::GlobalVariable::InternalLinkage, InitFunc,
Twine(InitFunc->getName(), "$initializer$"));
InitFuncPtr->setSection(".CRT$XDU");
// This variable has discardable linkage, we have to add it to @llvm.used to
// ensure it won't get discarded.
CGM.addUsedGlobal(InitFuncPtr);
return InitFuncPtr;
};
std::vector<llvm::Function *> NonComdatInits;
for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) {
llvm::GlobalVariable *GV = cast<llvm::GlobalVariable>(
CGM.GetGlobalValue(CGM.getMangledName(CXXThreadLocalInitVars[I])));
llvm::Function *F = CXXThreadLocalInits[I];
// If the GV is already in a comdat group, then we have to join it.
if (llvm::Comdat *C = GV->getComdat())
AddToXDU(F)->setComdat(C);
else
NonComdatInits.push_back(F);
}
if (!NonComdatInits.empty()) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
SourceLocation(), /*TLS=*/true);
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
AddToXDU(InitFunc);
}
}
LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) {
CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
return LValue();
}
static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
return ConstantAddress(GV, Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
GV->setAlignment(Align.getQuantity());
return ConstantAddress(GV, Align);
}
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_header",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind),
/*Local=*/true);
}
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_footer",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind),
/*Local=*/true);
}
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind),
/*Local=*/true);
}
namespace {
struct ResetGuardBit final : EHScopeStack::Cleanup {
Address Guard;
unsigned GuardNum;
ResetGuardBit(Address Guard, unsigned GuardNum)
: Guard(Guard), GuardNum(GuardNum) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Reset the bit in the mask so that the static variable may be
// reinitialized.
CGBuilderTy &Builder = CGF.Builder;
llvm::LoadInst *LI = Builder.CreateLoad(Guard);
llvm::ConstantInt *Mask =
llvm::ConstantInt::get(CGF.IntTy, ~(1ULL << GuardNum));
Builder.CreateStore(Builder.CreateAnd(LI, Mask), Guard);
}
};
struct CallInitThreadAbort final : EHScopeStack::Cleanup {
llvm::Value *Guard;
CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Calling _Init_thread_abort will reset the guard's state.
CGF.EmitNounwindRuntimeCall(getInitThreadAbortFn(CGF.CGM), Guard);
}
};
}
void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *GV,
bool PerformInit) {
// MSVC only uses guards for static locals.
if (!D.isStaticLocal()) {
assert(GV->hasWeakLinkage() || GV->hasLinkOnceLinkage());
// GlobalOpt is allowed to discard the initializer, so use linkonce_odr.
llvm::Function *F = CGF.CurFn;
F->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
F->setComdat(CGM.getModule().getOrInsertComdat(F->getName()));
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
return;
}
bool ThreadlocalStatic = D.getTLSKind();
bool ThreadsafeStatic = getContext().getLangOpts().ThreadsafeStatics;
// Thread-safe static variables which aren't thread-specific have a
// per-variable guard.
bool HasPerVariableGuard = ThreadsafeStatic && !ThreadlocalStatic;
CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *GuardTy = CGF.Int32Ty;
llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0);
CharUnits GuardAlign = CharUnits::fromQuantity(4);
// Get the guard variable for this function if we have one already.
GuardInfo *GI = nullptr;
if (ThreadlocalStatic)
GI = &ThreadLocalGuardVariableMap[D.getDeclContext()];
else if (!ThreadsafeStatic)
GI = &GuardVariableMap[D.getDeclContext()];
llvm::GlobalVariable *GuardVar = GI ? GI->Guard : nullptr;
unsigned GuardNum;
if (D.isExternallyVisible()) {
// Externally visible variables have to be numbered in Sema to properly
// handle unreachable VarDecls.
GuardNum = getContext().getStaticLocalNumber(&D);
assert(GuardNum > 0);
GuardNum--;
} else if (HasPerVariableGuard) {
GuardNum = ThreadSafeGuardNumMap[D.getDeclContext()]++;
} else {
// Non-externally visible variables are numbered here in CodeGen.
GuardNum = GI->BitIndex++;
}
if (!HasPerVariableGuard && GuardNum >= 32) {
if (D.isExternallyVisible())
ErrorUnsupportedABI(CGF, "more than 32 guarded initializations");
GuardNum %= 32;
GuardVar = nullptr;
}
if (!GuardVar) {
// Mangle the name for the guard.
SmallString<256> GuardName;
{
llvm::raw_svector_ostream Out(GuardName);
if (HasPerVariableGuard)
getMangleContext().mangleThreadSafeStaticGuardVariable(&D, GuardNum,
Out);
else
getMangleContext().mangleStaticGuardVariable(&D, Out);
}
// Create the guard variable with a zero-initializer. Just absorb linkage,
// visibility and dll storage class from the guarded variable.
GuardVar =
new llvm::GlobalVariable(CGM.getModule(), GuardTy, /*isConstant=*/false,
GV->getLinkage(), Zero, GuardName.str());
GuardVar->setVisibility(GV->getVisibility());
GuardVar->setDLLStorageClass(GV->getDLLStorageClass());
GuardVar->setAlignment(GuardAlign.getQuantity());
if (GuardVar->isWeakForLinker())
GuardVar->setComdat(
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
if (D.getTLSKind())
GuardVar->setThreadLocal(true);
if (GI && !HasPerVariableGuard)
GI->Guard = GuardVar;
}
ConstantAddress GuardAddr(GuardVar, GuardAlign);
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
if (!HasPerVariableGuard) {
// Pseudo code for the test:
// if (!(GuardVar & MyGuardBit)) {
// GuardVar |= MyGuardBit;
// ... initialize the object ...;
// }
// Test our bit from the guard variable.
llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1ULL << GuardNum);
llvm::LoadInst *LI = Builder.CreateLoad(GuardAddr);
llvm::Value *NeedsInit =
Builder.CreateICmpEQ(Builder.CreateAnd(LI, Bit), Zero);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock, EndBlock,
CodeGenFunction::GuardKind::VariableGuard, &D);
// Set our bit in the guard variable and emit the initializer and add a global
// destructor if appropriate.
CGF.EmitBlock(InitBlock);
Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardAddr);
CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardAddr, GuardNum);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
Builder.CreateBr(EndBlock);
// Continue.
CGF.EmitBlock(EndBlock);
} else {
// Pseudo code for the test:
// if (TSS > _Init_thread_epoch) {
// _Init_thread_header(&TSS);
// if (TSS == -1) {
// ... initialize the object ...;
// _Init_thread_footer(&TSS);
// }
// }
//
// The algorithm is almost identical to what can be found in the appendix
// found in N2325.
// This BasicBLock determines whether or not we have any work to do.
llvm::LoadInst *FirstGuardLoad = Builder.CreateLoad(GuardAddr);
FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::LoadInst *InitThreadEpoch =
Builder.CreateLoad(getInitThreadEpochPtr(CGM));
llvm::Value *IsUninitialized =
Builder.CreateICmpSGT(FirstGuardLoad, InitThreadEpoch);
llvm::BasicBlock *AttemptInitBlock = CGF.createBasicBlock("init.attempt");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
CGF.EmitCXXGuardedInitBranch(IsUninitialized, AttemptInitBlock, EndBlock,
CodeGenFunction::GuardKind::VariableGuard, &D);
// This BasicBlock attempts to determine whether or not this thread is
// responsible for doing the initialization.
CGF.EmitBlock(AttemptInitBlock);
CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM),
GuardAddr.getPointer());
llvm::LoadInst *SecondGuardLoad = Builder.CreateLoad(GuardAddr);
SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::Value *ShouldDoInit =
Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt());
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
Builder.CreateCondBr(ShouldDoInit, InitBlock, EndBlock);
// Ok, we ended up getting selected as the initializing thread.
CGF.EmitBlock(InitBlock);
CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardAddr);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM),
GuardAddr.getPointer());
Builder.CreateBr(EndBlock);
CGF.EmitBlock(EndBlock);
}
}
bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
// Null-ness for function memptrs only depends on the first field, which is
// the function pointer. The rest don't matter, so we can zero initialize.
if (MPT->isMemberFunctionPointer())
return true;
// The virtual base adjustment field is always -1 for null, so if we have one
// we can't zero initialize. The field offset is sometimes also -1 if 0 is a
// valid field offset.
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
return (!MSInheritanceAttr::hasVBTableOffsetField(Inheritance) &&
RD->nullFieldOffsetIsZero());
}
llvm::Type *
MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
llvm::SmallVector<llvm::Type *, 4> fields;
if (MPT->isMemberFunctionPointer())
fields.push_back(CGM.VoidPtrTy); // FunctionPointerOrVirtualThunk
else
fields.push_back(CGM.IntTy); // FieldOffset
if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
Inheritance))
fields.push_back(CGM.IntTy);
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
fields.push_back(CGM.IntTy);
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
fields.push_back(CGM.IntTy); // VirtualBaseAdjustmentOffset
if (fields.size() == 1)
return fields[0];
return llvm::StructType::get(CGM.getLLVMContext(), fields);
}
void MicrosoftCXXABI::
GetNullMemberPointerFields(const MemberPointerType *MPT,
llvm::SmallVectorImpl<llvm::Constant *> &fields) {
assert(fields.empty());
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
if (MPT->isMemberFunctionPointer()) {
// FunctionPointerOrVirtualThunk
fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
} else {
if (RD->nullFieldOffsetIsZero())
fields.push_back(getZeroInt()); // FieldOffset
else
fields.push_back(getAllOnesInt()); // FieldOffset
}
if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
Inheritance))
fields.push_back(getZeroInt());
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
fields.push_back(getZeroInt());
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
fields.push_back(getAllOnesInt());
}
llvm::Constant *
MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::SmallVector<llvm::Constant *, 4> fields;
GetNullMemberPointerFields(MPT, fields);
if (fields.size() == 1)
return fields[0];
llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields);
assert(Res->getType() == ConvertMemberPointerType(MPT));
return Res;
}
llvm::Constant *
MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField,
bool IsMemberFunction,
const CXXRecordDecl *RD,
CharUnits NonVirtualBaseAdjustment,
unsigned VBTableIndex) {
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
// Single inheritance class member pointer are represented as scalars instead
// of aggregates.
if (MSInheritanceAttr::hasOnlyOneField(IsMemberFunction, Inheritance))
return FirstField;
llvm::SmallVector<llvm::Constant *, 4> fields;
fields.push_back(FirstField);
if (MSInheritanceAttr::hasNVOffsetField(IsMemberFunction, Inheritance))
fields.push_back(llvm::ConstantInt::get(
CGM.IntTy, NonVirtualBaseAdjustment.getQuantity()));
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) {
CharUnits Offs = CharUnits::Zero();
if (VBTableIndex)
Offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
fields.push_back(llvm::ConstantInt::get(CGM.IntTy, Offs.getQuantity()));
}
// The rest of the fields are adjusted by conversions to a more derived class.
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBTableIndex));
return llvm::ConstantStruct::getAnon(fields);
}
llvm::Constant *
MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) {
- const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ return EmitMemberDataPointer(MPT->getMostRecentCXXRecordDecl(), offset);
+}
+
+llvm::Constant *MicrosoftCXXABI::EmitMemberDataPointer(const CXXRecordDecl *RD,
+ CharUnits offset) {
if (RD->getMSInheritanceModel() ==
MSInheritanceAttr::Keyword_virtual_inheritance)
offset -= getContext().getOffsetOfBaseWithVBPtr(RD);
llvm::Constant *FirstField =
llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity());
return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD,
CharUnits::Zero(), /*VBTableIndex=*/0);
}
llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
QualType MPType) {
const MemberPointerType *DstTy = MPType->castAs<MemberPointerType>();
const ValueDecl *MPD = MP.getMemberPointerDecl();
if (!MPD)
return EmitNullMemberPointer(DstTy);
ASTContext &Ctx = getContext();
ArrayRef<const CXXRecordDecl *> MemberPointerPath = MP.getMemberPointerPath();
llvm::Constant *C;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
C = EmitMemberFunctionPointer(MD);
} else {
+ // For a pointer to data member, start off with the offset of the field in
+ // the class in which it was declared, and convert from there if necessary.
+ // For indirect field decls, get the outermost anonymous field and use the
+ // parent class.
CharUnits FieldOffset = Ctx.toCharUnitsFromBits(Ctx.getFieldOffset(MPD));
- C = EmitMemberDataPointer(DstTy, FieldOffset);
+ const FieldDecl *FD = dyn_cast<FieldDecl>(MPD);
+ if (!FD)
+ FD = cast<FieldDecl>(*cast<IndirectFieldDecl>(MPD)->chain_begin());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getParent());
+ RD = RD->getMostRecentNonInjectedDecl();
+ C = EmitMemberDataPointer(RD, FieldOffset);
}
if (!MemberPointerPath.empty()) {
const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
const Type *SrcRecTy = Ctx.getTypeDeclType(SrcRD).getTypePtr();
const MemberPointerType *SrcTy =
Ctx.getMemberPointerType(DstTy->getPointeeType(), SrcRecTy)
->castAs<MemberPointerType>();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
SmallVector<const CXXBaseSpecifier *, 4> DerivedToBasePath;
const CXXRecordDecl *PrevRD = SrcRD;
for (const CXXRecordDecl *PathElem : MemberPointerPath) {
const CXXRecordDecl *Base = nullptr;
const CXXRecordDecl *Derived = nullptr;
if (DerivedMember) {
Base = PathElem;
Derived = PrevRD;
} else {
Base = PrevRD;
Derived = PathElem;
}
for (const CXXBaseSpecifier &BS : Derived->bases())
if (BS.getType()->getAsCXXRecordDecl()->getCanonicalDecl() ==
Base->getCanonicalDecl())
DerivedToBasePath.push_back(&BS);
PrevRD = PathElem;
}
assert(DerivedToBasePath.size() == MemberPointerPath.size());
CastKind CK = DerivedMember ? CK_DerivedToBaseMemberPointer
: CK_BaseToDerivedMemberPointer;
C = EmitMemberPointerConversion(SrcTy, DstTy, CK, DerivedToBasePath.begin(),
DerivedToBasePath.end(), C);
}
return C;
}
llvm::Constant *
MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
CodeGenTypes &Types = CGM.getTypes();
unsigned VBTableIndex = 0;
llvm::Constant *FirstField;
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
if (!MD->isVirtual()) {
llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
Ty = CGM.PtrDiffTy;
}
FirstField = CGM.GetAddrOfFunction(MD, Ty);
} else {
auto &VTableContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VTableContext.getMethodVFTableLocation(MD);
FirstField = EmitVirtualMemPtrThunk(MD, ML);
// Include the vfptr adjustment if the method is in a non-primary vftable.
NonVirtualBaseAdjustment += ML.VFPtrOffset;
if (ML.VBase)
VBTableIndex = VTableContext.getVBTableIndex(RD, ML.VBase) * 4;
}
if (VBTableIndex == 0 &&
RD->getMSInheritanceModel() ==
MSInheritanceAttr::Keyword_virtual_inheritance)
NonVirtualBaseAdjustment -= getContext().getOffsetOfBaseWithVBPtr(RD);
// The rest of the fields are common with data member pointers.
FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy);
return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD,
NonVirtualBaseAdjustment, VBTableIndex);
}
/// Member pointers are the same if they're either bitwise identical *or* both
/// null. Null-ness for function members is determined by the first field,
/// while for data member pointers we must compare all fields.
llvm::Value *
MicrosoftCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) {
CGBuilderTy &Builder = CGF.Builder;
// Handle != comparisons by switching the sense of all boolean operations.
llvm::ICmpInst::Predicate Eq;
llvm::Instruction::BinaryOps And, Or;
if (Inequality) {
Eq = llvm::ICmpInst::ICMP_NE;
And = llvm::Instruction::Or;
Or = llvm::Instruction::And;
} else {
Eq = llvm::ICmpInst::ICMP_EQ;
And = llvm::Instruction::And;
Or = llvm::Instruction::Or;
}
// If this is a single field member pointer (single inheritance), this is a
// single icmp.
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
if (MSInheritanceAttr::hasOnlyOneField(MPT->isMemberFunctionPointer(),
Inheritance))
return Builder.CreateICmp(Eq, L, R);
// Compare the first field.
llvm::Value *L0 = Builder.CreateExtractValue(L, 0, "lhs.0");
llvm::Value *R0 = Builder.CreateExtractValue(R, 0, "rhs.0");
llvm::Value *Cmp0 = Builder.CreateICmp(Eq, L0, R0, "memptr.cmp.first");
// Compare everything other than the first field.
llvm::Value *Res = nullptr;
llvm::StructType *LType = cast<llvm::StructType>(L->getType());
for (unsigned I = 1, E = LType->getNumElements(); I != E; ++I) {
llvm::Value *LF = Builder.CreateExtractValue(L, I);
llvm::Value *RF = Builder.CreateExtractValue(R, I);
llvm::Value *Cmp = Builder.CreateICmp(Eq, LF, RF, "memptr.cmp.rest");
if (Res)
Res = Builder.CreateBinOp(And, Res, Cmp);
else
Res = Cmp;
}
// Check if the first field is 0 if this is a function pointer.
if (MPT->isMemberFunctionPointer()) {
// (l1 == r1 && ...) || l0 == 0
llvm::Value *Zero = llvm::Constant::getNullValue(L0->getType());
llvm::Value *IsZero = Builder.CreateICmp(Eq, L0, Zero, "memptr.cmp.iszero");
Res = Builder.CreateBinOp(Or, Res, IsZero);
}
// Combine the comparison of the first field, which must always be true for
// this comparison to succeeed.
return Builder.CreateBinOp(And, Res, Cmp0, "memptr.cmp");
}
llvm::Value *
MicrosoftCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
CGBuilderTy &Builder = CGF.Builder;
llvm::SmallVector<llvm::Constant *, 4> fields;
// We only need one field for member functions.
if (MPT->isMemberFunctionPointer())
fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
else
GetNullMemberPointerFields(MPT, fields);
assert(!fields.empty());
llvm::Value *FirstField = MemPtr;
if (MemPtr->getType()->isStructTy())
FirstField = Builder.CreateExtractValue(MemPtr, 0);
llvm::Value *Res = Builder.CreateICmpNE(FirstField, fields[0], "memptr.cmp0");
// For function member pointers, we only need to test the function pointer
// field. The other fields if any can be garbage.
if (MPT->isMemberFunctionPointer())
return Res;
// Otherwise, emit a series of compares and combine the results.
for (int I = 1, E = fields.size(); I < E; ++I) {
llvm::Value *Field = Builder.CreateExtractValue(MemPtr, I);
llvm::Value *Next = Builder.CreateICmpNE(Field, fields[I], "memptr.cmp");
Res = Builder.CreateOr(Res, Next, "memptr.tobool");
}
return Res;
}
bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT,
llvm::Constant *Val) {
// Function pointers are null if the pointer in the first field is null.
if (MPT->isMemberFunctionPointer()) {
llvm::Constant *FirstField = Val->getType()->isStructTy() ?
Val->getAggregateElement(0U) : Val;
return FirstField->isNullValue();
}
// If it's not a function pointer and it's zero initializable, we can easily
// check zero.
if (isZeroInitializable(MPT) && Val->isNullValue())
return true;
// Otherwise, break down all the fields for comparison. Hopefully these
// little Constants are reused, while a big null struct might not be.
llvm::SmallVector<llvm::Constant *, 4> Fields;
GetNullMemberPointerFields(MPT, Fields);
if (Fields.size() == 1) {
assert(Val->getType()->isIntegerTy());
return Val == Fields[0];
}
unsigned I, E;
for (I = 0, E = Fields.size(); I != E; ++I) {
if (Val->getAggregateElement(I) != Fields[I])
break;
}
return I == E;
}
llvm::Value *
MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
Address This,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
llvm::Value *VBPtr =
Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
if (VBPtrOut) *VBPtrOut = VBPtr;
VBPtr = Builder.CreateBitCast(VBPtr,
CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
CharUnits VBPtrAlign;
if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
VBPtrAlign = This.getAlignment().alignmentAtOffset(
CharUnits::fromQuantity(CI->getSExtValue()));
} else {
VBPtrAlign = CGF.getPointerAlign();
}
llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
VBTableOffset, llvm::ConstantInt::get(VBTableOffset->getType(), 2),
"vbtindex", /*isExact=*/true);
// Load an i32 offset from the vb-table.
llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
"vbase_offs");
}
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
// it.
llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
llvm::BasicBlock *OriginalBB = nullptr;
llvm::BasicBlock *SkipAdjustBB = nullptr;
llvm::BasicBlock *VBaseAdjustBB = nullptr;
// In the unspecified inheritance model, there might not be a vbtable at all,
// in which case we need to skip the virtual base lookup. If there is a
// vbtable, the first entry is a no-op entry that gives back the original
// base, so look for a virtual base adjustment offset of zero.
if (VBPtrOffset) {
OriginalBB = Builder.GetInsertBlock();
VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
llvm::Value *IsVirtual =
Builder.CreateICmpNE(VBTableOffset, getZeroInt(),
"memptr.is_vbase");
Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
CGF.EmitBlock(VBaseAdjustBB);
}
// If we weren't given a dynamic vbptr offset, RD should be complete and we'll
// know the vbptr offset.
if (!VBPtrOffset) {
CharUnits offs = CharUnits::Zero();
if (!RD->hasDefinition()) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"member pointer representation requires a "
"complete class type for %0 to perform this expression");
Diags.Report(E->getExprLoc(), DiagID) << RD << E->getSourceRange();
} else if (RD->getNumVBases())
offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
}
llvm::Value *VBPtr = nullptr;
llvm::Value *VBaseOffs =
GetVBaseOffsetFromVBPtr(CGF, Base, VBPtrOffset, VBTableOffset, &VBPtr);
llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
// Merge control flow with the case where we didn't have to adjust.
if (VBaseAdjustBB) {
Builder.CreateBr(SkipAdjustBB);
CGF.EmitBlock(SkipAdjustBB);
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
Phi->addIncoming(Base.getPointer(), OriginalBB);
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
return Phi;
}
return AdjustedBase;
}
llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MPT->isMemberDataPointer());
unsigned AS = Base.getAddressSpace();
llvm::Type *PType =
CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
CGBuilderTy &Builder = CGF.Builder;
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
// Extract the fields we need, regardless of model. We'll apply them if we
// have them.
llvm::Value *FieldOffset = MemPtr;
llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
llvm::Value *VBPtrOffset = nullptr;
if (MemPtr->getType()->isStructTy()) {
// We need to extract values.
unsigned I = 0;
FieldOffset = Builder.CreateExtractValue(MemPtr, I++);
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
}
llvm::Value *Addr;
if (VirtualBaseAdjustmentOffset) {
Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
VBPtrOffset);
} else {
Addr = Base.getPointer();
}
// Cast to char*.
Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
// Apply the offset, which we assume is non-null.
Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the address
// space of the base pointer.
return Builder.CreateBitCast(Addr, PType);
}
llvm::Value *
MicrosoftCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
E->getCastKind() == CK_ReinterpretMemberPointer);
// Use constant emission if we can.
if (isa<llvm::Constant>(Src))
return EmitMemberPointerConversion(E, cast<llvm::Constant>(Src));
// We may be adding or dropping fields from the member pointer, so we need
// both types and the inheritance models of both records.
const MemberPointerType *SrcTy =
E->getSubExpr()->getType()->castAs<MemberPointerType>();
const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
bool IsFunc = SrcTy->isMemberFunctionPointer();
// If the classes use the same null representation, reinterpret_cast is a nop.
bool IsReinterpret = E->getCastKind() == CK_ReinterpretMemberPointer;
if (IsReinterpret && IsFunc)
return Src;
CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
if (IsReinterpret &&
SrcRD->nullFieldOffsetIsZero() == DstRD->nullFieldOffsetIsZero())
return Src;
CGBuilderTy &Builder = CGF.Builder;
// Branch past the conversion if Src is null.
llvm::Value *IsNotNull = EmitMemberPointerIsNotNull(CGF, Src, SrcTy);
llvm::Constant *DstNull = EmitNullMemberPointer(DstTy);
// C++ 5.2.10p9: The null member pointer value is converted to the null member
// pointer value of the destination type.
if (IsReinterpret) {
// For reinterpret casts, sema ensures that src and dst are both functions
// or data and have the same size, which means the LLVM types should match.
assert(Src->getType() == DstNull->getType());
return Builder.CreateSelect(IsNotNull, Src, DstNull);
}
llvm::BasicBlock *OriginalBB = Builder.GetInsertBlock();
llvm::BasicBlock *ConvertBB = CGF.createBasicBlock("memptr.convert");
llvm::BasicBlock *ContinueBB = CGF.createBasicBlock("memptr.converted");
Builder.CreateCondBr(IsNotNull, ConvertBB, ContinueBB);
CGF.EmitBlock(ConvertBB);
llvm::Value *Dst = EmitNonNullMemberPointerConversion(
SrcTy, DstTy, E->getCastKind(), E->path_begin(), E->path_end(), Src,
Builder);
Builder.CreateBr(ContinueBB);
// In the continuation, choose between DstNull and Dst.
CGF.EmitBlock(ContinueBB);
llvm::PHINode *Phi = Builder.CreatePHI(DstNull->getType(), 2, "memptr.converted");
Phi->addIncoming(DstNull, OriginalBB);
Phi->addIncoming(Dst, ConvertBB);
return Phi;
}
llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
CGBuilderTy &Builder) {
const CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
const CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling SrcInheritance = SrcRD->getMSInheritanceModel();
MSInheritanceAttr::Spelling DstInheritance = DstRD->getMSInheritanceModel();
bool IsFunc = SrcTy->isMemberFunctionPointer();
bool IsConstant = isa<llvm::Constant>(Src);
// Decompose src.
llvm::Value *FirstField = Src;
llvm::Value *NonVirtualBaseAdjustment = getZeroInt();
llvm::Value *VirtualBaseAdjustmentOffset = getZeroInt();
llvm::Value *VBPtrOffset = getZeroInt();
if (!MSInheritanceAttr::hasOnlyOneField(IsFunc, SrcInheritance)) {
// We need to extract values.
unsigned I = 0;
FirstField = Builder.CreateExtractValue(Src, I++);
if (MSInheritanceAttr::hasNVOffsetField(IsFunc, SrcInheritance))
NonVirtualBaseAdjustment = Builder.CreateExtractValue(Src, I++);
if (MSInheritanceAttr::hasVBPtrOffsetField(SrcInheritance))
VBPtrOffset = Builder.CreateExtractValue(Src, I++);
if (MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance))
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(Src, I++);
}
bool IsDerivedToBase = (CK == CK_DerivedToBaseMemberPointer);
const MemberPointerType *DerivedTy = IsDerivedToBase ? SrcTy : DstTy;
const CXXRecordDecl *DerivedClass = DerivedTy->getMostRecentCXXRecordDecl();
// For data pointers, we adjust the field offset directly. For functions, we
// have a separate field.
llvm::Value *&NVAdjustField = IsFunc ? NonVirtualBaseAdjustment : FirstField;
// The virtual inheritance model has a quirk: the virtual base table is always
// referenced when dereferencing a member pointer even if the member pointer
// is non-virtual. This is accounted for by adjusting the non-virtual offset
// to point backwards to the top of the MDC from the first VBase. Undo this
// adjustment to normalize the member pointer.
llvm::Value *SrcVBIndexEqZero =
Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
if (SrcInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) {
if (int64_t SrcOffsetToFirstVBase =
getContext().getOffsetOfBaseWithVBPtr(SrcRD).getQuantity()) {
llvm::Value *UndoSrcAdjustment = Builder.CreateSelect(
SrcVBIndexEqZero,
llvm::ConstantInt::get(CGM.IntTy, SrcOffsetToFirstVBase),
getZeroInt());
NVAdjustField = Builder.CreateNSWAdd(NVAdjustField, UndoSrcAdjustment);
}
}
// A non-zero vbindex implies that we are dealing with a source member in a
// floating virtual base in addition to some non-virtual offset. If the
// vbindex is zero, we are dealing with a source that exists in a non-virtual,
// fixed, base. The difference between these two cases is that the vbindex +
// nvoffset *always* point to the member regardless of what context they are
// evaluated in so long as the vbindex is adjusted. A member inside a fixed
// base requires explicit nv adjustment.
llvm::Constant *BaseClassOffset = llvm::ConstantInt::get(
CGM.IntTy,
CGM.computeNonVirtualBaseClassOffset(DerivedClass, PathBegin, PathEnd)
.getQuantity());
llvm::Value *NVDisp;
if (IsDerivedToBase)
NVDisp = Builder.CreateNSWSub(NVAdjustField, BaseClassOffset, "adj");
else
NVDisp = Builder.CreateNSWAdd(NVAdjustField, BaseClassOffset, "adj");
NVAdjustField = Builder.CreateSelect(SrcVBIndexEqZero, NVDisp, getZeroInt());
// Update the vbindex to an appropriate value in the destination because
// SrcRD's vbtable might not be a strict prefix of the one in DstRD.
llvm::Value *DstVBIndexEqZero = SrcVBIndexEqZero;
if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance) &&
MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance)) {
if (llvm::GlobalVariable *VDispMap =
getAddrOfVirtualDisplacementMap(SrcRD, DstRD)) {
llvm::Value *VBIndex = Builder.CreateExactUDiv(
VirtualBaseAdjustmentOffset, llvm::ConstantInt::get(CGM.IntTy, 4));
if (IsConstant) {
llvm::Constant *Mapping = VDispMap->getInitializer();
VirtualBaseAdjustmentOffset =
Mapping->getAggregateElement(cast<llvm::Constant>(VBIndex));
} else {
llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
VirtualBaseAdjustmentOffset =
Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
CharUnits::fromQuantity(4));
}
DstVBIndexEqZero =
Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
}
}
// Set the VBPtrOffset to zero if the vbindex is zero. Otherwise, initialize
// it to the offset of the vbptr.
if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance)) {
llvm::Value *DstVBPtrOffset = llvm::ConstantInt::get(
CGM.IntTy,
getContext().getASTRecordLayout(DstRD).getVBPtrOffset().getQuantity());
VBPtrOffset =
Builder.CreateSelect(DstVBIndexEqZero, getZeroInt(), DstVBPtrOffset);
}
// Likewise, apply a similar adjustment so that dereferencing the member
// pointer correctly accounts for the distance between the start of the first
// virtual base and the top of the MDC.
if (DstInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) {
if (int64_t DstOffsetToFirstVBase =
getContext().getOffsetOfBaseWithVBPtr(DstRD).getQuantity()) {
llvm::Value *DoDstAdjustment = Builder.CreateSelect(
DstVBIndexEqZero,
llvm::ConstantInt::get(CGM.IntTy, DstOffsetToFirstVBase),
getZeroInt());
NVAdjustField = Builder.CreateNSWSub(NVAdjustField, DoDstAdjustment);
}
}
// Recompose dst from the null struct and the adjusted fields from src.
llvm::Value *Dst;
if (MSInheritanceAttr::hasOnlyOneField(IsFunc, DstInheritance)) {
Dst = FirstField;
} else {
Dst = llvm::UndefValue::get(ConvertMemberPointerType(DstTy));
unsigned Idx = 0;
Dst = Builder.CreateInsertValue(Dst, FirstField, Idx++);
if (MSInheritanceAttr::hasNVOffsetField(IsFunc, DstInheritance))
Dst = Builder.CreateInsertValue(Dst, NonVirtualBaseAdjustment, Idx++);
if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance))
Dst = Builder.CreateInsertValue(Dst, VBPtrOffset, Idx++);
if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance))
Dst = Builder.CreateInsertValue(Dst, VirtualBaseAdjustmentOffset, Idx++);
}
return Dst;
}
llvm::Constant *
MicrosoftCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) {
const MemberPointerType *SrcTy =
E->getSubExpr()->getType()->castAs<MemberPointerType>();
const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
CastKind CK = E->getCastKind();
return EmitMemberPointerConversion(SrcTy, DstTy, CK, E->path_begin(),
E->path_end(), Src);
}
llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, llvm::Constant *Src) {
assert(CK == CK_DerivedToBaseMemberPointer ||
CK == CK_BaseToDerivedMemberPointer ||
CK == CK_ReinterpretMemberPointer);
// If src is null, emit a new null for dst. We can't return src because dst
// might have a new representation.
if (MemberPointerConstantIsNull(SrcTy, Src))
return EmitNullMemberPointer(DstTy);
// We don't need to do anything for reinterpret_casts of non-null member
// pointers. We should only get here when the two type representations have
// the same size.
if (CK == CK_ReinterpretMemberPointer)
return Src;
CGBuilderTy Builder(CGM, CGM.getLLVMContext());
auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion(
SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder));
return Dst;
}
CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MPT->isMemberFunctionPointer());
const FunctionProtoType *FPT =
MPT->getPointeeType()->castAs<FunctionProtoType>();
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
CGBuilderTy &Builder = CGF.Builder;
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
// Extract the fields we need, regardless of model. We'll apply them if we
// have them.
llvm::Value *FunctionPointer = MemPtr;
llvm::Value *NonVirtualBaseAdjustment = nullptr;
llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
llvm::Value *VBPtrOffset = nullptr;
if (MemPtr->getType()->isStructTy()) {
// We need to extract values.
unsigned I = 0;
FunctionPointer = Builder.CreateExtractValue(MemPtr, I++);
if (MSInheritanceAttr::hasNVOffsetField(MPT, Inheritance))
NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++);
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
}
if (VirtualBaseAdjustmentOffset) {
ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
VirtualBaseAdjustmentOffset, VBPtrOffset);
} else {
ThisPtrForCall = This.getPointer();
}
if (NonVirtualBaseAdjustment) {
// Apply the adjustment and cast back to the original struct type.
llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
"this.adjusted");
}
FunctionPointer =
Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
CGCallee Callee(FPT, FunctionPointer);
return Callee;
}
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
return new MicrosoftCXXABI(CGM);
}
// MS RTTI Overview:
// The run time type information emitted by cl.exe contains 5 distinct types of
// structures. Many of them reference each other.
//
// TypeInfo: Static classes that are returned by typeid.
//
// CompleteObjectLocator: Referenced by vftables. They contain information
// required for dynamic casting, including OffsetFromTop. They also contain
// a reference to the TypeInfo for the type and a reference to the
// CompleteHierarchyDescriptor for the type.
//
// ClassHierarchyDescriptor: Contains information about a class hierarchy.
// Used during dynamic_cast to walk a class hierarchy. References a base
// class array and the size of said array.
//
// BaseClassArray: Contains a list of classes in a hierarchy. BaseClassArray is
// somewhat of a misnomer because the most derived class is also in the list
// as well as multiple copies of virtual bases (if they occur multiple times
// in the hierarchy.) The BaseClassArray contains one BaseClassDescriptor for
// every path in the hierarchy, in pre-order depth first order. Note, we do
// not declare a specific llvm type for BaseClassArray, it's merely an array
// of BaseClassDescriptor pointers.
//
// BaseClassDescriptor: Contains information about a class in a class hierarchy.
// BaseClassDescriptor is also somewhat of a misnomer for the same reason that
// BaseClassArray is. It contains information about a class within a
// hierarchy such as: is this base is ambiguous and what is its offset in the
// vbtable. The names of the BaseClassDescriptors have all of their fields
// mangled into them so they can be aggressively deduplicated by the linker.
static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
StringRef MangledName("??_7type_info@@6B@");
if (auto VTable = CGM.getModule().getNamedGlobal(MangledName))
return VTable;
return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
/*isConstant=*/true,
llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, MangledName);
}
namespace {
/// A Helper struct that stores information about a class in a class
/// hierarchy. The information stored in these structs struct is used during
/// the generation of ClassHierarchyDescriptors and BaseClassDescriptors.
// During RTTI creation, MSRTTIClasses are stored in a contiguous array with
// implicit depth first pre-order tree connectivity. getFirstChild and
// getNextSibling allow us to walk the tree efficiently.
struct MSRTTIClass {
enum {
IsPrivateOnPath = 1 | 8,
IsAmbiguous = 2,
IsPrivate = 4,
IsVirtual = 16,
HasHierarchyDescriptor = 64
};
MSRTTIClass(const CXXRecordDecl *RD) : RD(RD) {}
uint32_t initialize(const MSRTTIClass *Parent,
const CXXBaseSpecifier *Specifier);
MSRTTIClass *getFirstChild() { return this + 1; }
static MSRTTIClass *getNextChild(MSRTTIClass *Child) {
return Child + 1 + Child->NumBases;
}
const CXXRecordDecl *RD, *VirtualRoot;
uint32_t Flags, NumBases, OffsetInVBase;
};
/// Recursively initialize the base class array.
uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent,
const CXXBaseSpecifier *Specifier) {
Flags = HasHierarchyDescriptor;
if (!Parent) {
VirtualRoot = nullptr;
OffsetInVBase = 0;
} else {
if (Specifier->getAccessSpecifier() != AS_public)
Flags |= IsPrivate | IsPrivateOnPath;
if (Specifier->isVirtual()) {
Flags |= IsVirtual;
VirtualRoot = RD;
OffsetInVBase = 0;
} else {
if (Parent->Flags & IsPrivateOnPath)
Flags |= IsPrivateOnPath;
VirtualRoot = Parent->VirtualRoot;
OffsetInVBase = Parent->OffsetInVBase + RD->getASTContext()
.getASTRecordLayout(Parent->RD).getBaseClassOffset(RD).getQuantity();
}
}
NumBases = 0;
MSRTTIClass *Child = getFirstChild();
for (const CXXBaseSpecifier &Base : RD->bases()) {
NumBases += Child->initialize(this, &Base) + 1;
Child = getNextChild(Child);
}
return NumBases;
}
static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
switch (Ty->getLinkage()) {
case NoLinkage:
case InternalLinkage:
case UniqueExternalLinkage:
return llvm::GlobalValue::InternalLinkage;
case VisibleNoLinkage:
case ModuleInternalLinkage:
case ModuleLinkage:
case ExternalLinkage:
return llvm::GlobalValue::LinkOnceODRLinkage;
}
llvm_unreachable("Invalid linkage!");
}
/// An ephemeral helper class for building MS RTTI types. It caches some
/// calls to the module and information about the most derived class in a
/// hierarchy.
struct MSRTTIBuilder {
enum {
HasBranchingHierarchy = 1,
HasVirtualBranchingHierarchy = 2,
HasAmbiguousBases = 4
};
MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
: CGM(ABI.CGM), Context(CGM.getContext()),
VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
ABI(ABI) {}
llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
llvm::GlobalVariable *
getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes);
llvm::GlobalVariable *getClassHierarchyDescriptor();
llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo &Info);
CodeGenModule &CGM;
ASTContext &Context;
llvm::LLVMContext &VMContext;
llvm::Module &Module;
const CXXRecordDecl *RD;
llvm::GlobalVariable::LinkageTypes Linkage;
MicrosoftCXXABI &ABI;
};
} // namespace
/// Recursively serializes a class hierarchy in pre-order depth first
/// order.
static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes,
const CXXRecordDecl *RD) {
Classes.push_back(MSRTTIClass(RD));
for (const CXXBaseSpecifier &Base : RD->bases())
serializeClassHierarchy(Classes, Base.getType()->getAsCXXRecordDecl());
}
/// Find ambiguity among base classes.
static void
detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases;
llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases;
llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases;
for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) {
if ((Class->Flags & MSRTTIClass::IsVirtual) &&
!VirtualBases.insert(Class->RD).second) {
Class = MSRTTIClass::getNextChild(Class);
continue;
}
if (!UniqueBases.insert(Class->RD).second)
AmbiguousBases.insert(Class->RD);
Class++;
}
if (AmbiguousBases.empty())
return;
for (MSRTTIClass &Class : Classes)
if (AmbiguousBases.count(Class.RD))
Class.Flags |= MSRTTIClass::IsAmbiguous;
}
llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
ABI.getMangleContext().mangleCXXRTTIClassHierarchyDescriptor(RD, Out);
}
// Check to see if we've already declared this ClassHierarchyDescriptor.
if (auto CHD = Module.getNamedGlobal(MangledName))
return CHD;
// Serialize the class hierarchy and initialize the CHD Fields.
SmallVector<MSRTTIClass, 8> Classes;
serializeClassHierarchy(Classes, RD);
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
detectAmbiguousBases(Classes);
int Flags = 0;
for (auto Class : Classes) {
if (Class.RD->getNumBases() > 1)
Flags |= HasBranchingHierarchy;
// Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We
// believe the field isn't actually used.
if (Class.Flags & MSRTTIClass::IsAmbiguous)
Flags |= HasAmbiguousBases;
}
if ((Flags & HasBranchingHierarchy) && RD->getNumVBases() != 0)
Flags |= HasVirtualBranchingHierarchy;
// These gep indices are used to get the address of the first element of the
// base class array.
llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0),
llvm::ConstantInt::get(CGM.IntTy, 0)};
// Forward-declare the class hierarchy descriptor
auto Type = ABI.getClassHierarchyDescriptorType();
auto CHD = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr,
MangledName);
if (CHD->isWeakForLinker())
CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName()));
auto *Bases = getBaseClassArray(Classes);
// Initialize the base class ClassHierarchyDescriptor.
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, 0), // reserved by the runtime
llvm::ConstantInt::get(CGM.IntTy, Flags),
llvm::ConstantInt::get(CGM.IntTy, Classes.size()),
ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr(
Bases->getValueType(), Bases,
llvm::ArrayRef<llvm::Value *>(GEPIndices))),
};
CHD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
return CHD;
}
llvm::GlobalVariable *
MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
ABI.getMangleContext().mangleCXXRTTIBaseClassArray(RD, Out);
}
// Forward-declare the base class array.
// cl.exe pads the base class array with 1 (in 32 bit mode) or 4 (in 64 bit
// mode) bytes of padding. We provide a pointer sized amount of padding by
// adding +1 to Classes.size(). The sections have pointer alignment and are
// marked pick-any so it shouldn't matter.
llvm::Type *PtrType = ABI.getImageRelativeType(
ABI.getBaseClassDescriptorType()->getPointerTo());
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
/*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
if (BCA->isWeakForLinker())
BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName()));
// Initialize the BaseClassArray.
SmallVector<llvm::Constant *, 8> BaseClassArrayData;
for (MSRTTIClass &Class : Classes)
BaseClassArrayData.push_back(
ABI.getImageRelativeConstant(getBaseClassDescriptor(Class)));
BaseClassArrayData.push_back(llvm::Constant::getNullValue(PtrType));
BCA->setInitializer(llvm::ConstantArray::get(ArrType, BaseClassArrayData));
return BCA;
}
llvm::GlobalVariable *
MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
// Compute the fields for the BaseClassDescriptor. They are computed up front
// because they are mangled into the name of the object.
uint32_t OffsetInVBTable = 0;
int32_t VBPtrOffset = -1;
if (Class.VirtualRoot) {
auto &VTableContext = CGM.getMicrosoftVTableContext();
OffsetInVBTable = VTableContext.getVBTableIndex(RD, Class.VirtualRoot) * 4;
VBPtrOffset = Context.getASTRecordLayout(RD).getVBPtrOffset().getQuantity();
}
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
ABI.getMangleContext().mangleCXXRTTIBaseClassDescriptor(
Class.RD, Class.OffsetInVBase, VBPtrOffset, OffsetInVBTable,
Class.Flags, Out);
}
// Check to see if we've already declared this object.
if (auto BCD = Module.getNamedGlobal(MangledName))
return BCD;
// Forward-declare the base class descriptor.
auto Type = ABI.getBaseClassDescriptorType();
auto BCD =
new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
if (BCD->isWeakForLinker())
BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName()));
// Initialize the BaseClassDescriptor.
llvm::Constant *Fields[] = {
ABI.getImageRelativeConstant(
ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
llvm::ConstantInt::get(CGM.IntTy, OffsetInVBTable),
llvm::ConstantInt::get(CGM.IntTy, Class.Flags),
ABI.getImageRelativeConstant(
MSRTTIBuilder(ABI, Class.RD).getClassHierarchyDescriptor()),
};
BCD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
return BCD;
}
llvm::GlobalVariable *
MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info.MangledPath, Out);
}
// Check to see if we've already computed this complete object locator.
if (auto COL = Module.getNamedGlobal(MangledName))
return COL;
// Compute the fields of the complete object locator.
int OffsetToTop = Info.FullOffsetInMDC.getQuantity();
int VFPtrOffset = 0;
// The offset includes the vtordisp if one exists.
if (const CXXRecordDecl *VBase = Info.getVBaseWithVPtr())
if (Context.getASTRecordLayout(RD)
.getVBaseOffsetsMap()
.find(VBase)
->second.hasVtorDisp())
VFPtrOffset = Info.NonVirtualOffset.getQuantity() + 4;
// Forward-declare the complete object locator.
llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
auto COL = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
// Initialize the CompleteObjectLocator.
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, ABI.isImageRelative()),
llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
ABI.getImageRelativeConstant(
CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
ABI.getImageRelativeConstant(COL),
};
llvm::ArrayRef<llvm::Constant *> FieldsRef(Fields);
if (!ABI.isImageRelative())
FieldsRef = FieldsRef.drop_back();
COL->setInitializer(llvm::ConstantStruct::get(Type, FieldsRef));
if (COL->isWeakForLinker())
COL->setComdat(CGM.getModule().getOrInsertComdat(COL->getName()));
return COL;
}
static QualType decomposeTypeForEH(ASTContext &Context, QualType T,
bool &IsConst, bool &IsVolatile,
bool &IsUnaligned) {
T = Context.getExceptionObjectType(T);
// C++14 [except.handle]p3:
// A handler is a match for an exception object of type E if [...]
// - the handler is of type cv T or const T& where T is a pointer type and
// E is a pointer type that can be converted to T by [...]
// - a qualification conversion
IsConst = false;
IsVolatile = false;
IsUnaligned = false;
QualType PointeeType = T->getPointeeType();
if (!PointeeType.isNull()) {
IsConst = PointeeType.isConstQualified();
IsVolatile = PointeeType.isVolatileQualified();
IsUnaligned = PointeeType.getQualifiers().hasUnaligned();
}
// Member pointer types like "const int A::*" are represented by having RTTI
// for "int A::*" and separately storing the const qualifier.
if (const auto *MPTy = T->getAs<MemberPointerType>())
T = Context.getMemberPointerType(PointeeType.getUnqualifiedType(),
MPTy->getClass());
// Pointer types like "const int * const *" are represented by having RTTI
// for "const int **" and separately storing the const qualifier.
if (T->isPointerType())
T = Context.getPointerType(PointeeType.getUnqualifiedType());
return T;
}
CatchTypeInfo
MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
QualType CatchHandlerType) {
// TypeDescriptors for exceptions never have qualified pointer types,
// qualifiers are stored separately in order to support qualification
// conversions.
bool IsConst, IsVolatile, IsUnaligned;
Type =
decomposeTypeForEH(getContext(), Type, IsConst, IsVolatile, IsUnaligned);
bool IsReference = CatchHandlerType->isReferenceType();
uint32_t Flags = 0;
if (IsConst)
Flags |= 1;
if (IsVolatile)
Flags |= 2;
if (IsUnaligned)
Flags |= 4;
if (IsReference)
Flags |= 8;
return CatchTypeInfo{getAddrOfRTTIDescriptor(Type)->stripPointerCasts(),
Flags};
}
/// Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
/// llvm::GlobalVariable * because different type descriptors have different
/// types, and need to be abstracted. They are abstracting by casting the
/// address to an Int8PtrTy.
llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
getMangleContext().mangleCXXRTTI(Type, Out);
}
// Check to see if we've already declared this TypeDescriptor.
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
// Note for the future: If we would ever like to do deferred emission of
// RTTI, check if emitting vtables opportunistically need any adjustment.
// Compute the fields for the TypeDescriptor.
SmallString<256> TypeInfoString;
{
llvm::raw_svector_ostream Out(TypeInfoString);
getMangleContext().mangleCXXRTTIName(Type, Out);
}
// Declare and initialize the TypeDescriptor.
llvm::Constant *Fields[] = {
getTypeInfoVTable(CGM), // VFPtr
llvm::ConstantPointerNull::get(CGM.Int8PtrTy), // Runtime data
llvm::ConstantDataArray::getString(CGM.getLLVMContext(), TypeInfoString)};
llvm::StructType *TypeDescriptorType =
getTypeDescriptorType(TypeInfoString);
auto *Var = new llvm::GlobalVariable(
CGM.getModule(), TypeDescriptorType, /*isConstant=*/false,
getLinkageForRTTI(Type),
llvm::ConstantStruct::get(TypeDescriptorType, Fields),
MangledName);
if (Var->isWeakForLinker())
Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName()));
return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
}
/// Gets or a creates a Microsoft CompleteObjectLocator.
llvm::GlobalVariable *
MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
const VPtrInfo &Info) {
return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
}
void MicrosoftCXXABI::emitCXXStructor(GlobalDecl GD) {
if (auto *ctor = dyn_cast<CXXConstructorDecl>(GD.getDecl())) {
// There are no constructor variants, always emit the complete destructor.
llvm::Function *Fn =
CGM.codegenCXXStructor(GD.getWithCtorType(Ctor_Complete));
CGM.maybeSetTrivialComdat(*ctor, *Fn);
return;
}
auto *dtor = cast<CXXDestructorDecl>(GD.getDecl());
// Emit the base destructor if the base and complete (vbase) destructors are
// equivalent. This effectively implements -mconstructor-aliases as part of
// the ABI.
if (GD.getDtorType() == Dtor_Complete &&
dtor->getParent()->getNumVBases() == 0)
GD = GD.getWithDtorType(Dtor_Base);
// The base destructor is equivalent to the base destructor of its
// base class if there is exactly one non-virtual base class with a
// non-trivial destructor, there are no fields with a non-trivial
// destructor, and the body of the destructor is trivial.
if (GD.getDtorType() == Dtor_Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
return;
llvm::Function *Fn = CGM.codegenCXXStructor(GD);
if (Fn->isWeakForLinker())
Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName()));
}
llvm::Function *
MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT) {
assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
getMangleContext().mangleCXXCtor(CD, CT, Out);
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
return cast<llvm::Function>(GV);
// Create the llvm::Function.
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
const CXXRecordDecl *RD = CD->getParent();
QualType RecordTy = getContext().getRecordType(RD);
llvm::Function *ThunkFn = llvm::Function::Create(
ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
FnInfo.getEffectiveCallingConvention()));
if (ThunkFn->isWeakForLinker())
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
bool IsCopy = CT == Ctor_CopyingClosure;
// Start codegen.
CodeGenFunction CGF(CGM);
CGF.CurGD = GlobalDecl(CD, Ctor_Complete);
// Build FunctionArgs.
FunctionArgList FunctionArgs;
// A constructor always starts with a 'this' pointer as its first argument.
buildThisParam(CGF, FunctionArgs);
// Following the 'this' pointer is a reference to the source object that we
// are copying from.
ImplicitParamDecl SrcParam(
getContext(), /*DC=*/nullptr, SourceLocation(),
&getContext().Idents.get("src"),
getContext().getLValueReferenceType(RecordTy,
/*SpelledAsLValue=*/true),
ImplicitParamDecl::Other);
if (IsCopy)
FunctionArgs.push_back(&SrcParam);
// Constructors for classes which utilize virtual bases have an additional
// parameter which indicates whether or not it is being delegated to by a more
// derived constructor.
ImplicitParamDecl IsMostDerived(getContext(), /*DC=*/nullptr,
SourceLocation(),
&getContext().Idents.get("is_most_derived"),
getContext().IntTy, ImplicitParamDecl::Other);
// Only add the parameter to the list if the class has virtual bases.
if (RD->getNumVBases() > 0)
FunctionArgs.push_back(&IsMostDerived);
// Start defining the function.
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
FunctionArgs, CD->getLocation(), SourceLocation());
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
llvm::Value *This = getThisValue(CGF);
llvm::Value *SrcVal =
IsCopy ? CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&SrcParam), "src")
: nullptr;
CallArgList Args;
// Push the this ptr.
Args.add(RValue::get(This), CD->getThisType());
// Push the src ptr.
if (SrcVal)
Args.add(RValue::get(SrcVal), SrcParam.getType());
// Add the rest of the default arguments.
SmallVector<const Stmt *, 4> ArgVec;
ArrayRef<ParmVarDecl *> params = CD->parameters().drop_front(IsCopy ? 1 : 0);
for (const ParmVarDecl *PD : params) {
assert(PD->hasDefaultArg() && "ctor closure lacks default args");
ArgVec.push_back(PD->getDefaultArg());
}
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
const auto *FPT = CD->getType()->castAs<FunctionProtoType>();
CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
// Insert any ABI-specific implicit constructor arguments.
AddedStructorArgs ExtraArgs =
addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false, Args);
// Call the destructor with our arguments.
llvm::Constant *CalleePtr =
CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
CGCallee Callee =
CGCallee::forDirect(CalleePtr, GlobalDecl(CD, Ctor_Complete));
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
Args, CD, Ctor_Complete, ExtraArgs.Prefix, ExtraArgs.Suffix);
CGF.EmitCall(CalleeInfo, Callee, ReturnValueSlot(), Args);
Cleanups.ForceCleanup();
// Emit the ret instruction, remove any temporary instructions created for the
// aid of CodeGen.
CGF.FinishFunction(SourceLocation());
return ThunkFn;
}
llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
uint32_t NVOffset,
int32_t VBPtrOffset,
uint32_t VBIndex) {
assert(!T->isReferenceType());
CXXRecordDecl *RD = T->getAsCXXRecordDecl();
const CXXConstructorDecl *CD =
RD ? CGM.getContext().getCopyConstructorForExceptionObject(RD) : nullptr;
CXXCtorType CT = Ctor_Complete;
if (CD)
if (!hasDefaultCXXMethodCC(getContext(), CD) || CD->getNumParams() != 1)
CT = Ctor_CopyingClosure;
uint32_t Size = getContext().getTypeSizeInChars(T).getQuantity();
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
getMangleContext().mangleCXXCatchableType(T, CD, CT, Size, NVOffset,
VBPtrOffset, VBIndex, Out);
}
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
return getImageRelativeConstant(GV);
// The TypeDescriptor is used by the runtime to determine if a catch handler
// is appropriate for the exception object.
llvm::Constant *TD = getImageRelativeConstant(getAddrOfRTTIDescriptor(T));
// The runtime is responsible for calling the copy constructor if the
// exception is caught by value.
llvm::Constant *CopyCtor;
if (CD) {
if (CT == Ctor_CopyingClosure)
CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure);
else
CopyCtor = CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy);
} else {
CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
}
CopyCtor = getImageRelativeConstant(CopyCtor);
bool IsScalar = !RD;
bool HasVirtualBases = false;
bool IsStdBadAlloc = false; // std::bad_alloc is special for some reason.
QualType PointeeType = T;
if (T->isPointerType())
PointeeType = T->getPointeeType();
if (const CXXRecordDecl *RD = PointeeType->getAsCXXRecordDecl()) {
HasVirtualBases = RD->getNumVBases() > 0;
if (IdentifierInfo *II = RD->getIdentifier())
IsStdBadAlloc = II->isStr("bad_alloc") && RD->isInStdNamespace();
}
// Encode the relevant CatchableType properties into the Flags bitfield.
// FIXME: Figure out how bits 2 or 8 can get set.
uint32_t Flags = 0;
if (IsScalar)
Flags |= 1;
if (HasVirtualBases)
Flags |= 4;
if (IsStdBadAlloc)
Flags |= 16;
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
TD, // TypeDescriptor
llvm::ConstantInt::get(CGM.IntTy, NVOffset), // NonVirtualAdjustment
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), // OffsetToVBPtr
llvm::ConstantInt::get(CGM.IntTy, VBIndex), // VBTableIndex
llvm::ConstantInt::get(CGM.IntTy, Size), // Size
CopyCtor // CopyCtor
};
llvm::StructType *CTType = getCatchableTypeType();
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CTType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(CTType, Fields), MangledName);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
GV->setSection(".xdata");
if (GV->isWeakForLinker())
GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
return getImageRelativeConstant(GV);
}
llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
assert(!T->isReferenceType());
// See if we've already generated a CatchableTypeArray for this type before.
llvm::GlobalVariable *&CTA = CatchableTypeArrays[T];
if (CTA)
return CTA;
// Ensure that we don't have duplicate entries in our CatchableTypeArray by
// using a SmallSetVector. Duplicates may arise due to virtual bases
// occurring more than once in the hierarchy.
llvm::SmallSetVector<llvm::Constant *, 2> CatchableTypes;
// C++14 [except.handle]p3:
// A handler is a match for an exception object of type E if [...]
// - the handler is of type cv T or cv T& and T is an unambiguous public
// base class of E, or
// - the handler is of type cv T or const T& where T is a pointer type and
// E is a pointer type that can be converted to T by [...]
// - a standard pointer conversion (4.10) not involving conversions to
// pointers to private or protected or ambiguous classes
const CXXRecordDecl *MostDerivedClass = nullptr;
bool IsPointer = T->isPointerType();
if (IsPointer)
MostDerivedClass = T->getPointeeType()->getAsCXXRecordDecl();
else
MostDerivedClass = T->getAsCXXRecordDecl();
// Collect all the unambiguous public bases of the MostDerivedClass.
if (MostDerivedClass) {
const ASTContext &Context = getContext();
const ASTRecordLayout &MostDerivedLayout =
Context.getASTRecordLayout(MostDerivedClass);
MicrosoftVTableContext &VTableContext = CGM.getMicrosoftVTableContext();
SmallVector<MSRTTIClass, 8> Classes;
serializeClassHierarchy(Classes, MostDerivedClass);
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
detectAmbiguousBases(Classes);
for (const MSRTTIClass &Class : Classes) {
// Skip any ambiguous or private bases.
if (Class.Flags &
(MSRTTIClass::IsPrivateOnPath | MSRTTIClass::IsAmbiguous))
continue;
// Write down how to convert from a derived pointer to a base pointer.
uint32_t OffsetInVBTable = 0;
int32_t VBPtrOffset = -1;
if (Class.VirtualRoot) {
OffsetInVBTable =
VTableContext.getVBTableIndex(MostDerivedClass, Class.VirtualRoot)*4;
VBPtrOffset = MostDerivedLayout.getVBPtrOffset().getQuantity();
}
// Turn our record back into a pointer if the exception object is a
// pointer.
QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
if (IsPointer)
RTTITy = Context.getPointerType(RTTITy);
CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
VBPtrOffset, OffsetInVBTable));
}
}
// C++14 [except.handle]p3:
// A handler is a match for an exception object of type E if
// - The handler is of type cv T or cv T& and E and T are the same type
// (ignoring the top-level cv-qualifiers)
CatchableTypes.insert(getCatchableType(T));
// C++14 [except.handle]p3:
// A handler is a match for an exception object of type E if
// - the handler is of type cv T or const T& where T is a pointer type and
// E is a pointer type that can be converted to T by [...]
// - a standard pointer conversion (4.10) not involving conversions to
// pointers to private or protected or ambiguous classes
//
// C++14 [conv.ptr]p2:
// A prvalue of type "pointer to cv T," where T is an object type, can be
// converted to a prvalue of type "pointer to cv void".
if (IsPointer && T->getPointeeType()->isObjectType())
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
// C++14 [except.handle]p3:
// A handler is a match for an exception object of type E if [...]
// - the handler is of type cv T or const T& where T is a pointer or
// pointer to member type and E is std::nullptr_t.
//
// We cannot possibly list all possible pointer types here, making this
// implementation incompatible with the standard. However, MSVC includes an
// entry for pointer-to-void in this case. Let's do the same.
if (T->isNullPtrType())
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
uint32_t NumEntries = CatchableTypes.size();
llvm::Type *CTType =
getImageRelativeType(getCatchableTypeType()->getPointerTo());
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries
llvm::ConstantArray::get(
AT, llvm::makeArrayRef(CatchableTypes.begin(),
CatchableTypes.end())) // CatchableTypes
};
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out);
}
CTA = new llvm::GlobalVariable(
CGM.getModule(), CTAType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(CTAType, Fields), MangledName);
CTA->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CTA->setSection(".xdata");
if (CTA->isWeakForLinker())
CTA->setComdat(CGM.getModule().getOrInsertComdat(CTA->getName()));
return CTA;
}
llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
bool IsConst, IsVolatile, IsUnaligned;
T = decomposeTypeForEH(getContext(), T, IsConst, IsVolatile, IsUnaligned);
// The CatchableTypeArray enumerates the various (CV-unqualified) types that
// the exception object may be caught as.
llvm::GlobalVariable *CTA = getCatchableTypeArray(T);
// The first field in a CatchableTypeArray is the number of CatchableTypes.
// This is used as a component of the mangled name which means that we need to
// know what it is in order to see if we have previously generated the
// ThrowInfo.
uint32_t NumEntries =
cast<llvm::ConstantInt>(CTA->getInitializer()->getAggregateElement(0U))
->getLimitedValue();
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
getMangleContext().mangleCXXThrowInfo(T, IsConst, IsVolatile, IsUnaligned,
NumEntries, Out);
}
// Reuse a previously generated ThrowInfo if we have generated an appropriate
// one before.
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
return GV;
// The RTTI TypeDescriptor uses an unqualified type but catch clauses must
// be at least as CV qualified. Encode this requirement into the Flags
// bitfield.
uint32_t Flags = 0;
if (IsConst)
Flags |= 1;
if (IsVolatile)
Flags |= 2;
if (IsUnaligned)
Flags |= 4;
// The cleanup-function (a destructor) must be called when the exception
// object's lifetime ends.
llvm::Constant *CleanupFn = llvm::Constant::getNullValue(CGM.Int8PtrTy);
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DtorD = RD->getDestructor())
if (!DtorD->isTrivial())
CleanupFn = llvm::ConstantExpr::getBitCast(
CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)),
CGM.Int8PtrTy);
// This is unused as far as we can tell, initialize it to null.
llvm::Constant *ForwardCompat =
getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy));
llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant(
llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy));
llvm::StructType *TIType = getThrowInfoType();
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
getImageRelativeConstant(CleanupFn), // CleanupFn
ForwardCompat, // ForwardCompat
PointerToCatchableTypes // CatchableTypeArray
};
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), TIType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName));
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
GV->setSection(".xdata");
if (GV->isWeakForLinker())
GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
return GV;
}
void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
const Expr *SubExpr = E->getSubExpr();
QualType ThrowType = SubExpr->getType();
// The exception object lives on the stack and it's address is passed to the
// runtime function.
Address AI = CGF.CreateMemTemp(ThrowType);
CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(),
/*IsInit=*/true);
// The so-called ThrowInfo is used to describe how the exception object may be
// caught.
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
// Call into the runtime to throw the exception.
llvm::Value *Args[] = {
CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
TI
};
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
}
std::pair<llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) {
std::tie(This, std::ignore, RD) =
performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
Index: stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp (revision 356465)
@@ -1,695 +1,695 @@
//===--- ARM.cpp - ARM (not AArch64) Helpers for Tools ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ARM.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/TargetParser.h"
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
// Get SubArch (vN).
int arm::getARMSubArchVersionNumber(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
return llvm::ARM::parseArchVersion(Arch);
}
// True if M-profile.
bool arm::isARMMProfile(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::M;
}
// Get Arch/CPU from args.
void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
llvm::StringRef &CPU, bool FromAs) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ))
CPU = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
Arch = A->getValue();
if (!FromAs)
return;
for (const Arg *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
StringRef Value = A->getValue();
if (Value.startswith("-mcpu="))
CPU = Value.substr(6);
if (Value.startswith("-march="))
Arch = Value.substr(7);
}
}
// Handle -mhwdiv=.
// FIXME: Use ARMTargetParser.
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef HWDiv,
std::vector<StringRef> &Features) {
unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
// Handle -mfpu=.
static void getARMFPUFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef FPU,
std::vector<StringRef> &Features) {
unsigned FPUID = llvm::ARM::parseFPU(FPU);
if (!llvm::ARM::getFPUFeatures(FPUID, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
// Decode ARM features from string like +[no]featureA+[no]featureB+...
static bool DecodeARMFeatures(const Driver &D, StringRef text,
StringRef CPU, llvm::ARM::ArchKind ArchKind,
std::vector<StringRef> &Features) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features))
return false;
}
return true;
}
static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
std::vector<StringRef> &Features) {
CPU = CPU.split("+").first;
if (CPU != "generic") {
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseCPUArch(CPU);
unsigned Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
llvm::ARM::getExtensionFeatures(Extension, Features);
}
}
// Check if -march is valid by checking if it can be canonicalised and parsed.
// getARMArch is used here instead of just checking the -march value in order
// to handle -march=native correctly.
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName, llvm::StringRef CPUName,
std::vector<StringRef> &Features,
const llvm::Triple &Triple) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
std::string MArch = arm::getARMArch(ArchName, Triple);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(MArch);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
(Split.second.size() && !DecodeARMFeatures(
D, Split.second, CPUName, ArchKind, Features)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
std::vector<StringRef> &Features,
const llvm::Triple &Triple) {
std::pair<StringRef, StringRef> Split = CPUName.split("+");
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
(Split.second.size() && !DecodeARMFeatures(
D, Split.second, CPU, ArchKind, Features)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
bool arm::useAAPCSForMachO(const llvm::Triple &T) {
// The backend is hardwired to assume AAPCS for M-class processors, ensure
// the frontend matches that.
return T.getEnvironment() == llvm::Triple::EABI ||
T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
}
// Select mode for reading thread pointer (-mtp=soft/cp15).
arm::ReadTPMode arm::getReadTPMode(const ToolChain &TC, const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
const Driver &D = TC.getDriver();
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
.Case("cp15", ReadTPMode::Cp15)
.Case("soft", ReadTPMode::Soft)
.Default(ReadTPMode::Invalid);
if (ThreadPointer != ReadTPMode::Invalid)
return ThreadPointer;
if (StringRef(A->getValue()).empty())
D.Diag(diag::err_drv_missing_arg_mtp) << A->getAsString(Args);
else
D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
return ReadTPMode::Invalid;
}
return ReadTPMode::Soft;
}
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
const llvm::Triple &Triple = TC.getEffectiveTriple();
auto SubArch = getARMSubArchVersionNumber(Triple);
arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
options::OPT_mfloat_abi_EQ)) {
if (A->getOption().matches(options::OPT_msoft_float)) {
ABI = FloatABI::Soft;
} else if (A->getOption().matches(options::OPT_mhard_float)) {
ABI = FloatABI::Hard;
} else {
ABI = llvm::StringSwitch<arm::FloatABI>(A->getValue())
.Case("soft", FloatABI::Soft)
.Case("softfp", FloatABI::SoftFP)
.Case("hard", FloatABI::Hard)
.Default(FloatABI::Invalid);
if (ABI == FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
ABI = FloatABI::Soft;
}
}
// It is incorrect to select hard float ABI on MachO platforms if the ABI is
// "apcs-gnu".
if (Triple.isOSBinFormatMachO() && !useAAPCSForMachO(Triple) &&
ABI == FloatABI::Hard) {
D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getAsString(Args)
<< Triple.getArchName();
}
}
// If unspecified, choose the default based on the platform.
if (ABI == FloatABI::Invalid) {
switch (Triple.getOS()) {
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS: {
// Darwin defaults to "softfp" for v6 and v7.
ABI = (SubArch == 6 || SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
ABI = Triple.isWatchABI() ? FloatABI::Hard : ABI;
break;
}
case llvm::Triple::WatchOS:
ABI = FloatABI::Hard;
break;
// FIXME: this is invalid for WindowsCE
case llvm::Triple::Win32:
ABI = FloatABI::Hard;
break;
case llvm::Triple::NetBSD:
switch (Triple.getEnvironment()) {
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABIHF:
ABI = FloatABI::Hard;
break;
default:
ABI = FloatABI::Soft;
break;
}
break;
case llvm::Triple::FreeBSD:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
ABI = FloatABI::Hard;
break;
default:
// FreeBSD defaults to soft float
ABI = FloatABI::Soft;
break;
}
break;
case llvm::Triple::OpenBSD:
ABI = FloatABI::SoftFP;
break;
default:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABIHF:
case llvm::Triple::EABIHF:
ABI = FloatABI::Hard;
break;
case llvm::Triple::GNUEABI:
case llvm::Triple::MuslEABI:
case llvm::Triple::EABI:
// EABI is always AAPCS, and if it was not marked 'hard', it's softfp
ABI = FloatABI::SoftFP;
break;
case llvm::Triple::Android:
ABI = (SubArch >= 7) ? FloatABI::SoftFP : FloatABI::Soft;
break;
default:
// Assume "soft", but warn the user we are guessing.
if (Triple.isOSBinFormatMachO() &&
Triple.getSubArch() == llvm::Triple::ARMSubArch_v7em)
ABI = FloatABI::Hard;
else
ABI = FloatABI::Soft;
if (Triple.getOS() != llvm::Triple::UnknownOS ||
!Triple.isOSBinFormatMachO())
D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
break;
}
}
}
assert(ABI != FloatABI::Invalid && "must select an ABI");
return ABI;
}
void arm::getARMTargetFeatures(const ToolChain &TC,
const llvm::Triple &Triple,
const ArgList &Args,
ArgStringList &CmdArgs,
std::vector<StringRef> &Features,
bool ForAS) {
const Driver &D = TC.getDriver();
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
arm::ReadTPMode ThreadPointer = arm::getReadTPMode(TC, Args);
const Arg *WaCPU = nullptr, *WaFPU = nullptr;
const Arg *WaHDiv = nullptr, *WaArch = nullptr;
// This vector will accumulate features from the architecture
// extension suffixes on -mcpu and -march (e.g. the 'bar' in
// -mcpu=foo+bar). We want to apply those after the features derived
// from the FPU, in case -mfpu generates a negative feature which
// the +bar is supposed to override.
std::vector<StringRef> ExtensionFeatures;
if (!ForAS) {
// FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
// yet (it uses the -mfloat-abi and -msoft-float options), and it is
// stripped out by the ARM target. We should probably pass this a new
// -target-option, which is handled by the -cc1/-cc1as invocation.
//
// FIXME2: For consistency, it would be ideal if we set up the target
// machine state the same when using the frontend or the assembler. We don't
// currently do that for the assembler, we pass the options directly to the
// backend and never even instantiate the frontend TargetInfo. If we did,
// and used its handleTargetFeatures hook, then we could ensure the
// assembler and the frontend behave the same.
// Use software floating point operations?
if (ABI == arm::FloatABI::Soft)
Features.push_back("+soft-float");
// Use software floating point argument passing?
if (ABI != arm::FloatABI::Hard)
Features.push_back("+soft-float-abi");
} else {
// Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down
// to the assembler correctly.
for (const Arg *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
StringRef Value = A->getValue();
if (Value.startswith("-mfpu=")) {
WaFPU = A;
} else if (Value.startswith("-mcpu=")) {
WaCPU = A;
} else if (Value.startswith("-mhwdiv=")) {
WaHDiv = A;
} else if (Value.startswith("-march=")) {
WaArch = A;
}
}
}
if (ThreadPointer == arm::ReadTPMode::Cp15)
Features.push_back("+read-tp-hard");
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
StringRef ArchName;
StringRef CPUName;
// Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
if (WaCPU) {
if (CPUArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< CPUArg->getAsString(Args);
CPUName = StringRef(WaCPU->getValue()).substr(6);
CPUArg = WaCPU;
} else if (CPUArg)
CPUName = CPUArg->getValue();
// Check -march. ClangAs gives preference to -Wa,-march=.
if (WaArch) {
if (ArchArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< ArchArg->getAsString(Args);
ArchName = StringRef(WaArch->getValue()).substr(7);
checkARMArchName(D, WaArch, Args, ArchName, CPUName,
ExtensionFeatures, Triple);
// FIXME: Set Arch.
D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
} else if (ArchArg) {
ArchName = ArchArg->getValue();
checkARMArchName(D, ArchArg, Args, ArchName, CPUName,
ExtensionFeatures, Triple);
}
// Add CPU features for generic CPUs
if (CPUName == "native") {
llvm::StringMap<bool> HostFeatures;
if (llvm::sys::getHostCPUFeatures(HostFeatures))
for (auto &F : HostFeatures)
Features.push_back(
Args.MakeArgString((F.second ? "+" : "-") + F.first()));
} else if (!CPUName.empty()) {
// This sets the default features for the specified CPU. We certainly don't
// want to override the features that have been explicitly specified on the
// command line. Therefore, process them directly instead of appending them
// at the end later.
DecodeARMFeaturesFromCPU(D, CPUName, Features);
}
if (CPUArg)
checkARMCPUName(D, CPUArg, Args, CPUName, ArchName,
ExtensionFeatures, Triple);
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
if (WaFPU) {
if (FPUArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< FPUArg->getAsString(Args);
getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
Features);
} else if (FPUArg) {
getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
} else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
const char *AndroidFPU = "neon";
if (!llvm::ARM::getFPUFeatures(llvm::ARM::parseFPU(AndroidFPU), Features))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< std::string("-mfpu=") + AndroidFPU;
}
// Now we've finished accumulating features from arch, cpu and fpu,
// we can append the ones for architecture extensions that we
// collected separately.
Features.insert(std::end(Features),
std::begin(ExtensionFeatures), std::end(ExtensionFeatures));
// Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
if (WaHDiv) {
if (HDivArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< HDivArg->getAsString(Args);
getARMHWDivFeatures(D, WaHDiv, Args,
StringRef(WaHDiv->getValue()).substr(8), Features);
} else if (HDivArg)
getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
// Handle (arch-dependent) fp16fml/fullfp16 relationship.
// Must happen before any features are disabled due to soft-float.
// FIXME: this fp16fml option handling will be reimplemented after the
// TargetParser rewrite.
const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8_4a) {
const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
// Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
// Only append the +fp16fml if there is no -fp16fml after the +fullfp16.
if (std::find(Features.rbegin(), ItRFullFP16, "-fp16fml") == ItRFullFP16)
Features.push_back("+fp16fml");
}
else
goto fp16_fml_fallthrough;
}
else {
fp16_fml_fallthrough:
// In both of these cases, putting the 'other' feature on the end of the vector will
// result in the same effect as placing it immediately after the current feature.
if (ItRNoFullFP16 < ItRFP16FML)
Features.push_back("-fp16fml");
else if (ItRNoFullFP16 > ItRFP16FML)
Features.push_back("+fullfp16");
}
// Setting -msoft-float/-mfloat-abi=soft effectively disables the FPU (GCC
// ignores the -mfpu options in this case).
// Note that the ABI can also be set implicitly by the target selected.
if (ABI == arm::FloatABI::Soft) {
llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
// Disable all features relating to hardware FP.
// FIXME: Disabling fpregs should be enough all by itself, since all
// the other FP features are dependent on it. However
// there is currently no easy way to test this in clang, so for
// now just be explicit and disable all known dependent features
// as well.
for (std::string Feature : {
- "vfp2", "vfp2sp",
+ "vfp2", "vfp2sp", "vfp2d16", "vfp2d16sp",
"vfp3", "vfp3sp", "vfp3d16", "vfp3d16sp",
"vfp4", "vfp4sp", "vfp4d16", "vfp4d16sp",
"fp-armv8", "fp-armv8sp", "fp-armv8d16", "fp-armv8d16sp",
"fullfp16", "neon", "crypto", "dotprod", "fp16fml",
"fp64", "d32", "fpregs"})
Features.push_back(Args.MakeArgString("-" + Feature));
}
// En/disable crc code generation.
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
if (A->getOption().matches(options::OPT_mcrc))
Features.push_back("+crc");
else
Features.push_back("-crc");
}
// For Arch >= ARMv8.0: crypto = sha2 + aes
// FIXME: this needs reimplementation after the TargetParser rewrite
if (ArchName.find_lower("armv8a") != StringRef::npos ||
ArchName.find_lower("armv8.1a") != StringRef::npos ||
ArchName.find_lower("armv8.2a") != StringRef::npos ||
ArchName.find_lower("armv8.3a") != StringRef::npos ||
ArchName.find_lower("armv8.4a") != StringRef::npos) {
if (ArchName.find_lower("+crypto") != StringRef::npos) {
if (ArchName.find_lower("+nosha2") == StringRef::npos)
Features.push_back("+sha2");
if (ArchName.find_lower("+noaes") == StringRef::npos)
Features.push_back("+aes");
} else if (ArchName.find_lower("-crypto") != StringRef::npos) {
if (ArchName.find_lower("+sha2") == StringRef::npos)
Features.push_back("-sha2");
if (ArchName.find_lower("+aes") == StringRef::npos)
Features.push_back("-aes");
}
}
// CMSE: Check for target 8M (for -mcmse to be applicable) is performed later.
if (Args.getLastArg(options::OPT_mcmse))
Features.push_back("+8msecext");
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
if (Arg *A = Args.getLastArg(options::OPT_mlong_calls,
options::OPT_mno_long_calls)) {
if (A->getOption().matches(options::OPT_mlong_calls))
Features.push_back("+long-calls");
} else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
!Triple.isWatchOS()) {
Features.push_back("+long-calls");
}
// Generate execute-only output (no data access to code sections).
// This only makes sense for the compiler, not for the assembler.
if (!ForAS) {
// Supported only on ARMv6T2 and ARMv7 and above.
// Cannot be combined with -mno-movt or -mlong-calls
if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) {
if (A->getOption().matches(options::OPT_mexecute_only)) {
if (getARMSubArchVersionNumber(Triple) < 7 &&
llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6T2)
D.Diag(diag::err_target_unsupported_execute_only) << Triple.getArchName();
else if (Arg *B = Args.getLastArg(options::OPT_mno_movt))
D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
// Long calls create constant pool entries and have not yet been fixed up
// to play nicely with execute-only. Hence, they cannot be used in
// execute-only code for now
else if (Arg *B = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) {
if (B->getOption().matches(options::OPT_mlong_calls))
D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
}
Features.push_back("+execute-only");
}
}
}
// Kernel code has more strict alignment requirements.
if (KernelOrKext)
Features.push_back("+strict-align");
else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_munaligned_access)) {
// No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
// v8M Baseline follows on from v6M, so doesn't support unaligned memory
// access either.
else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline)
D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base";
} else
Features.push_back("+strict-align");
} else {
// Assume pre-ARMv6 doesn't support unaligned accesses.
//
// ARMv6 may or may not support unaligned accesses depending on the
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
// Darwin and NetBSD targets support unaligned accesses, and others don't.
//
// ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
// which raises an alignment fault on unaligned accesses. Linux
// defaults this bit to 0 and handles it as a system-wide (not
// per-process) setting. It is therefore safe to assume that ARMv7+
// Linux targets support unaligned accesses. The same goes for NaCl.
//
// The above behavior is consistent with GCC.
int VersionNum = getARMSubArchVersionNumber(Triple);
if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
if (VersionNum < 6 ||
Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
Features.push_back("+strict-align");
} else if (Triple.isOSLinux() || Triple.isOSNaCl()) {
if (VersionNum < 7)
Features.push_back("+strict-align");
} else
Features.push_back("+strict-align");
}
// llvm does not support reserving registers in general. There is support
// for reserving r9 on ARM though (defined as a platform-specific register
// in ARM EABI).
if (Args.hasArg(options::OPT_ffixed_r9))
Features.push_back("+reserve-r9");
// The kext linker doesn't know how to deal with movw/movt.
if (KernelOrKext || Args.hasArg(options::OPT_mno_movt))
Features.push_back("+no-movt");
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
}
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
if (!Arch.empty())
MArch = Arch;
else
MArch = Triple.getArchName();
MArch = StringRef(MArch).split("+").first.lower();
// Handle -march=native.
if (MArch == "native") {
std::string CPU = llvm::sys::getHostCPUName();
if (CPU != "generic") {
// Translate the native cpu into the architecture suffix for that CPU.
StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
// If there is no valid architecture suffix for this CPU we don't know how
// to handle it, so return no architecture.
if (Suffix.empty())
MArch = "";
else
MArch = std::string("arm") + Suffix.str();
}
}
return MArch;
}
/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
StringRef arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch = getARMArch(Arch, Triple);
// getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch
// here means an -march=native that we can't handle, so instead return no CPU.
if (MArch.empty())
return StringRef();
// We need to return an empty string here on invalid MArch values as the
// various places that call this function can't cope with a null result.
return Triple.getARMCPUForArch(MArch);
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple) {
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
if (!CPU.empty()) {
std::string MCPU = StringRef(CPU).split("+").first.lower();
// Handle -mcpu=native.
if (MCPU == "native")
return llvm::sys::getHostCPUName();
else
return MCPU;
}
return getARMCPUForMArch(Arch, Triple);
}
/// getLLVMArchSuffixForARM - Get the LLVM ArchKind value to use for a
/// particular CPU (or Arch, if CPU is generic). This is needed to
/// pass to functions like llvm::ARM::getDefaultFPU which need an
/// ArchKind as well as a CPU name.
llvm::ARM::ArchKind arm::getLLVMArchKindForARM(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple) {
llvm::ARM::ArchKind ArchKind;
if (CPU == "generic") {
std::string ARMArch = tools::arm::getARMArch(Arch, Triple);
ArchKind = llvm::ARM::parseArch(ARMArch);
if (ArchKind == llvm::ARM::ArchKind::INVALID)
// In case of generic Arch, i.e. "arm",
// extract arch from default cpu of the Triple
ArchKind = llvm::ARM::parseCPUArch(Triple.getARMCPUForArch(ARMArch));
} else {
// FIXME: horrible hack to get around the fact that Cortex-A7 is only an
// armv7k triple if it's actually been specified via "-arch armv7k".
ArchKind = (Arch == "armv7k" || Arch == "thumbv7k")
? llvm::ARM::ArchKind::ARMV7K
: llvm::ARM::parseCPUArch(CPU);
}
return ArchKind;
}
/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
/// CPU (or Arch, if CPU is generic).
// FIXME: This is redundant with -mcpu, why does LLVM use this.
StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple) {
llvm::ARM::ArchKind ArchKind = getLLVMArchKindForARM(CPU, Arch, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID)
return "";
return llvm::ARM::getSubArch(ArchKind);
}
void arm::appendBE8LinkFlag(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple) {
if (Args.hasArg(options::OPT_r))
return;
// ARMv7 (and later) and ARMv6-M do not support BE-32, so instruct the linker
// to generate BE-8 executables.
if (arm::getARMSubArchVersionNumber(Triple) >= 7 || arm::isARMMProfile(Triple))
CmdArgs.push_back("--be8");
}
Index: stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp (revision 356465)
@@ -1,1045 +1,1048 @@
//===--- Linux.h - Linux ToolChain Implementations --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Linux.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
#include "CommonArgs.h"
#include "clang/Config/config.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
using tools::addPathIfExists;
/// Get our best guess at the multiarch triple for a target.
///
/// Debian-based systems are starting to use a multiarch setup where they use
/// a target-triple directory in the library and header search paths.
/// Unfortunately, this triple does not align with the vanilla target triple,
/// so we provide a rough mapping here.
static std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef SysRoot) {
llvm::Triple::EnvironmentType TargetEnvironment =
TargetTriple.getEnvironment();
bool IsAndroid = TargetTriple.isAndroid();
bool IsMipsR6 = TargetTriple.getSubArch() == llvm::Triple::MipsSubArch_r6;
bool IsMipsN32Abi = TargetTriple.getEnvironment() == llvm::Triple::GNUABIN32;
// For most architectures, just use whatever we have rather than trying to be
// clever.
switch (TargetTriple.getArch()) {
default:
break;
// We use the existence of '/lib/<triple>' as a directory to detect some
// common linux triples that don't quite match the Clang triple for both
// 32-bit and 64-bit targets. Multiarch fixes its install triples to these
// regardless of what the actual target triple is.
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (IsAndroid) {
return "arm-linux-androideabi";
} else if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabihf"))
return "arm-linux-gnueabihf";
} else {
if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabi"))
return "arm-linux-gnueabi";
}
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
return "armeb-linux-gnueabihf";
} else {
if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabi"))
return "armeb-linux-gnueabi";
}
break;
case llvm::Triple::x86:
if (IsAndroid)
return "i686-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/i386-linux-gnu"))
return "i386-linux-gnu";
break;
case llvm::Triple::x86_64:
if (IsAndroid)
return "x86_64-linux-android";
// We don't want this for x32, otherwise it will match x86_64 libs
if (TargetEnvironment != llvm::Triple::GNUX32 &&
D.getVFS().exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
break;
case llvm::Triple::aarch64:
if (IsAndroid)
return "aarch64-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/aarch64-linux-gnu"))
return "aarch64-linux-gnu";
break;
case llvm::Triple::aarch64_be:
if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
return "aarch64_be-linux-gnu";
break;
case llvm::Triple::mips: {
std::string MT = IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
if (D.getVFS().exists(SysRoot + "/lib/" + MT))
return MT;
break;
}
case llvm::Triple::mipsel: {
if (IsAndroid)
return "mipsel-linux-android";
std::string MT = IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
if (D.getVFS().exists(SysRoot + "/lib/" + MT))
return MT;
break;
}
case llvm::Triple::mips64: {
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
if (D.getVFS().exists(SysRoot + "/lib/" + MT))
return MT;
if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
return "mips64-linux-gnu";
break;
}
case llvm::Triple::mips64el: {
if (IsAndroid)
return "mips64el-linux-android";
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6el" : "mips64el") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
if (D.getVFS().exists(SysRoot + "/lib/" + MT))
return MT;
if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
return "mips64el-linux-gnu";
break;
}
case llvm::Triple::ppc:
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
return "powerpc-linux-gnuspe";
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnu"))
return "powerpc-linux-gnu";
break;
case llvm::Triple::ppc64:
if (D.getVFS().exists(SysRoot + "/lib/powerpc64-linux-gnu"))
return "powerpc64-linux-gnu";
break;
case llvm::Triple::ppc64le:
if (D.getVFS().exists(SysRoot + "/lib/powerpc64le-linux-gnu"))
return "powerpc64le-linux-gnu";
break;
case llvm::Triple::sparc:
if (D.getVFS().exists(SysRoot + "/lib/sparc-linux-gnu"))
return "sparc-linux-gnu";
break;
case llvm::Triple::sparcv9:
if (D.getVFS().exists(SysRoot + "/lib/sparc64-linux-gnu"))
return "sparc64-linux-gnu";
break;
case llvm::Triple::systemz:
if (D.getVFS().exists(SysRoot + "/lib/s390x-linux-gnu"))
return "s390x-linux-gnu";
break;
}
return TargetTriple.str();
}
static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
if (Triple.isMIPS()) {
if (Triple.isAndroid()) {
StringRef CPUName;
StringRef ABIName;
tools::mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
if (CPUName == "mips32r6")
return "libr6";
if (CPUName == "mips32r2")
return "libr2";
}
// lib32 directory has a special meaning on MIPS targets.
// It contains N32 ABI binaries. Use this folder if produce
// code for N32 ABI only.
if (tools::mips::hasMipsAbiArg(Args, "n32"))
return "lib32";
return Triple.isArch32Bit() ? "lib" : "lib64";
}
// It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
// using that variant while targeting other architectures causes problems
// because the libraries are laid out in shared system roots that can't cope
// with a 'lib32' library search path being considered. So we only enable
// them when we know we may need it.
//
// FIXME: This is a bit of a hack. We should really unify this code for
// reasoning about oslibdir spellings with the lib dir spellings in the
// GCCInstallationDetector, but that is a more significant refactoring.
if (Triple.getArch() == llvm::Triple::x86 ||
Triple.getArch() == llvm::Triple::ppc)
return "lib32";
if (Triple.getArch() == llvm::Triple::x86_64 &&
Triple.getEnvironment() == llvm::Triple::GNUX32)
return "libx32";
if (Triple.getArch() == llvm::Triple::riscv32)
return "lib32";
return Triple.isArch32Bit() ? "lib" : "lib64";
}
static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
const Multilib &Multilib,
StringRef InstallPath,
ToolChain::path_list &Paths) {
if (const auto &PathsCallback = Multilibs.filePathsCallback())
for (const auto &Path : PathsCallback(Multilib))
addPathIfExists(D, InstallPath + Path, Paths);
}
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
SelectedMultilib = GCCInstallation.getMultilib();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
// Cross-compiling binutils and GCC installations (vanilla and openSUSE at
// least) put various tools in a triple-prefixed directory off of the parent
// of the GCC installation. We use the GCC triple here to ensure that we end
// up with tools that support the same amount of cross compiling as the
// detected GCC installation. For example, if we find a GCC installation
// targeting x86_64, but it is a bi-arch GCC installation, it can also be
// used to target i386.
// FIXME: This seems unlikely to be Linux-specific.
ToolChain::path_list &PPaths = getProgramPaths();
if (GCCInstallation.isValid()) {
PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
GCCInstallation.getTriple().str() + "/bin")
.str());
}
Distro Distro(D.getVFS());
if (Distro.IsAlpineLinux() || Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("now");
}
if (Distro.IsOpenSUSE() || Distro.IsUbuntu() || Distro.IsAlpineLinux() ||
Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("relro");
}
// The lld default page size is too large for Aarch64, which produces much
// larger .so files and images for arm64 device targets. Use 4KB page size
// for Android arm64 targets instead.
if (Triple.isAArch64() && Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("max-page-size=4096");
}
if (GCCInstallation.getParentLibPath().find("opt/rh/devtoolset") !=
StringRef::npos)
// With devtoolset on RHEL, we want to add a bin directory that is relative
// to the detected gcc install, because if we are using devtoolset gcc then
// we want to use other tools from devtoolset (e.g. ld) instead of the
// standard system tools.
PPaths.push_back(Twine(GCCInstallation.getParentLibPath() +
"/../bin").str());
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
ExtraOpts.push_back("-X");
const bool IsAndroid = Triple.isAndroid();
const bool IsMips = Triple.isMIPS();
const bool IsHexagon = Arch == llvm::Triple::hexagon;
const bool IsRISCV = Triple.isRISCV();
if (IsMips && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
// Do not use 'gnu' hash style for Mips targets because .gnu.hash
// and the MIPS ABI require .dynsym to be sorted in different ways.
// .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
// ABI requires a mapping between the GOT and the symbol table.
// Android loader does not support .gnu.hash until API 23.
// Hexagon linker/loader does not support .gnu.hash
if (!IsMips && !IsHexagon) {
if (Distro.IsRedhat() || Distro.IsOpenSUSE() || Distro.IsAlpineLinux() ||
(Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick) ||
(IsAndroid && !Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=gnu");
if (Distro.IsDebian() || Distro.IsOpenSUSE() ||
Distro == Distro::UbuntuLucid || Distro == Distro::UbuntuJaunty ||
Distro == Distro::UbuntuKarmic ||
(IsAndroid && Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=both");
}
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
if (IsAndroid || Distro.IsOpenSUSE())
ExtraOpts.push_back("--enable-new-dtags");
// The selection of paths to try here is designed to match the patterns which
// the GCC driver itself uses, as this is part of the GCC-compatible driver.
// This was determined by running GCC in a fake filesystem, creating all
// possible permutations of these directories, and seeing which ones it added
// to the link paths.
path_list &Paths = getFilePaths();
const std::string OSLibDir = getOSLibDir(Triple, Args);
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath = GCCInstallation.getParentLibPath();
// Add toolchain / multilib specific file paths.
addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
GCCInstallation.getInstallPath(), Paths);
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
addPathIfExists(
D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
Paths);
// GCC cross compiling toolchains will install target libraries which ship
// as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
// any part of the GCC installation in
// <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
// debatable, but is the reality today. We need to search this tree even
// when we have a sysroot somewhere else. It is the responsibility of
// whomever is doing the cross build targeting a sysroot using a GCC
// installation that is *not* within the system root to ensure two things:
//
// 1) Any DSOs that are linked in from this tree or from the install path
// above must be present on the system root and found via an
// appropriate rpath.
// 2) There must not be libraries installed into
// <prefix>/<triple>/<libdir> unless they should be preferred over
// those within the system root.
//
// Note that this matches the GCC behavior. See the below comment for where
// Clang diverges from GCC's behavior.
addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
OSLibDir + SelectedMultilib.osSuffix(),
Paths);
// If the GCC installation we found is inside of the sysroot, we want to
// prefer libraries installed in the parent prefix of the GCC installation.
// It is important to *not* use these paths when the GCC installation is
// outside of the system root as that can pick up unintended libraries.
// This usually happens when there is an external cross compiler on the
// host system, and a more minimal sysroot available that is the target of
// the cross. Note that GCC does include some of these directories in some
// configurations but this seems somewhere between questionable and simply
// a bug.
if (StringRef(LibPath).startswith(SysRoot)) {
addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
}
}
// Similar to the logic for GCC above, if we currently running Clang inside
// of the requested system root, add its parent library paths to
// those searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot)) {
addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
}
addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
if (IsAndroid) {
// Android sysroots contain a library directory for each supported OS
// version as well as some unversioned libraries in the usual multiarch
// directory.
unsigned Major;
unsigned Minor;
unsigned Micro;
Triple.getEnvironmentVersion(Major, Minor, Micro);
addPathIfExists(D,
SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
llvm::to_string(Major),
Paths);
}
addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
// 64-bit OpenEmbedded sysroots may not have a /usr/lib dir. So they cannot
// find /usr/lib64 as it is referenced as /usr/lib/../lib64. So we handle
// this here.
if (Triple.getVendor() == llvm::Triple::OpenEmbedded &&
Triple.isArch64Bit())
addPathIfExists(D, SysRoot + "/usr/" + OSLibDir, Paths);
else
addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
if (IsRISCV) {
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
addPathIfExists(D, SysRoot + "/" + OSLibDir + "/" + ABIName, Paths);
addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
}
// Try walking via the GCC triple path in case of biarch or multiarch GCC
// installations with strange symlinks.
if (GCCInstallation.isValid()) {
addPathIfExists(D,
SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
"/../../" + OSLibDir,
Paths);
// Add the 'other' biarch variant path
Multilib BiarchSibling;
if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
addPathIfExists(D, GCCInstallation.getInstallPath() +
BiarchSibling.gccSuffix(),
Paths);
}
// See comments above on the multilib variant for details of why this is
// included even from outside the sysroot.
const std::string &LibPath = GCCInstallation.getParentLibPath();
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const Multilib &Multilib = GCCInstallation.getMultilib();
addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib" +
Multilib.osSuffix(),
Paths);
// See comments above on the multilib variant for details of why this is
// only included from within the sysroot.
if (StringRef(LibPath).startswith(SysRoot))
addPathIfExists(D, LibPath, Paths);
}
// Similar to the logic for GCC above, if we are currently running Clang
// inside of the requested system root, add its parent library path to those
// searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot))
addPathIfExists(D, D.Dir + "/../lib", Paths);
addPathIfExists(D, SysRoot + "/lib", Paths);
addPathIfExists(D, SysRoot + "/usr/lib", Paths);
}
ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
if (getTriple().isAndroid())
return ToolChain::CST_Libcxx;
return ToolChain::CST_Libstdcxx;
}
bool Linux::HasNativeLLVMSupport() const { return true; }
Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
Tool *Linux::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
std::string Linux::computeSysRoot() const {
if (!getDriver().SysRoot.empty())
return getDriver().SysRoot;
if (getTriple().isAndroid()) {
// Android toolchains typically include a sysroot at ../sysroot relative to
// the clang binary.
const StringRef ClangDir = getDriver().getInstalledDir();
std::string AndroidSysRootPath = (ClangDir + "/../sysroot").str();
if (getVFS().exists(AndroidSysRootPath))
return AndroidSysRootPath;
}
if (!GCCInstallation.isValid() || !getTriple().isMIPS())
return std::string();
// Standalone MIPS toolchains use different names for sysroot folder
// and put it into different places. Here we try to check some known
// variants.
const StringRef InstallDir = GCCInstallation.getInstallPath();
const StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
std::string Path =
(InstallDir + "/../../../../" + TripleStr + "/libc" + Multilib.osSuffix())
.str();
if (getVFS().exists(Path))
return Path;
Path = (InstallDir + "/../../../../sysroot" + Multilib.osSuffix()).str();
if (getVFS().exists(Path))
return Path;
return std::string();
}
std::string Linux::getDynamicLinker(const ArgList &Args) const {
const llvm::Triple::ArchType Arch = getArch();
const llvm::Triple &Triple = getTriple();
const Distro Distro(getDriver().getVFS());
if (Triple.isAndroid())
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
if (Triple.isMusl()) {
std::string ArchName;
bool IsArm = false;
switch (Arch) {
case llvm::Triple::arm:
case llvm::Triple::thumb:
ArchName = "arm";
IsArm = true;
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
ArchName = "armeb";
IsArm = true;
break;
default:
ArchName = Triple.getArchName().str();
}
if (IsArm &&
(Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
ArchName += "hf";
return "/lib/ld-musl-" + ArchName + ".so.1";
}
std::string LibDir;
std::string Loader;
switch (Arch) {
default:
llvm_unreachable("unsupported architecture");
case llvm::Triple::aarch64:
LibDir = "lib";
Loader = "ld-linux-aarch64.so.1";
break;
case llvm::Triple::aarch64_be:
LibDir = "lib";
Loader = "ld-linux-aarch64_be.so.1";
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
case llvm::Triple::armeb:
case llvm::Triple::thumbeb: {
const bool HF =
Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard;
LibDir = "lib";
Loader = HF ? "ld-linux-armhf.so.3" : "ld-linux.so.3";
break;
}
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el: {
bool IsNaN2008 = tools::mips::isNaN2008(Args, Triple);
LibDir = "lib" + tools::mips::getMipsABILibSuffix(Args, Triple);
if (tools::mips::isUCLibc(Args))
Loader = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0";
else if (!Triple.hasEnvironment() &&
Triple.getVendor() == llvm::Triple::VendorType::MipsTechnologies)
Loader =
Triple.isLittleEndian() ? "ld-musl-mipsel.so.1" : "ld-musl-mips.so.1";
else
Loader = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1";
break;
}
case llvm::Triple::ppc:
LibDir = "lib";
Loader = "ld.so.1";
break;
case llvm::Triple::ppc64:
LibDir = "lib64";
Loader =
(tools::ppc::hasPPCAbiArg(Args, "elfv2")) ? "ld64.so.2" : "ld64.so.1";
break;
case llvm::Triple::ppc64le:
LibDir = "lib64";
Loader =
(tools::ppc::hasPPCAbiArg(Args, "elfv1")) ? "ld64.so.1" : "ld64.so.2";
break;
case llvm::Triple::riscv32: {
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
LibDir = "lib";
Loader = ("ld-linux-riscv32-" + ABIName + ".so.1").str();
break;
}
case llvm::Triple::riscv64: {
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
LibDir = "lib";
Loader = ("ld-linux-riscv64-" + ABIName + ".so.1").str();
break;
}
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
LibDir = "lib";
Loader = "ld-linux.so.2";
break;
case llvm::Triple::sparcv9:
LibDir = "lib64";
Loader = "ld-linux.so.2";
break;
case llvm::Triple::systemz:
LibDir = "lib";
Loader = "ld64.so.1";
break;
case llvm::Triple::x86:
LibDir = "lib";
Loader = "ld-linux.so.2";
break;
case llvm::Triple::x86_64: {
bool X32 = Triple.getEnvironment() == llvm::Triple::GNUX32;
LibDir = X32 ? "libx32" : "lib64";
Loader = X32 ? "ld-linux-x32.so.2" : "ld-linux-x86-64.so.2";
break;
}
}
if (Distro == Distro::Exherbo &&
(Triple.getVendor() == llvm::Triple::UnknownVendor ||
Triple.getVendor() == llvm::Triple::PC))
return "/usr/" + Triple.str() + "/lib/" + Loader;
return "/" + LibDir + "/" + Loader;
}
void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
const Driver &D = getDriver();
std::string SysRoot = computeSysRoot();
if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
return;
if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
- if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
- SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, "include");
- addSystemInclude(DriverArgs, CC1Args, P);
- }
+ SmallString<128> ResourceDirInclude(D.ResourceDir);
+ llvm::sys::path::append(ResourceDirInclude, "include");
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc) &&
+ (!getTriple().isMusl() || DriverArgs.hasArg(options::OPT_nostdlibinc)))
+ addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
// Check for configure-time C include directories.
StringRef CIncludeDirs(C_INCLUDE_DIRS);
if (CIncludeDirs != "") {
SmallVector<StringRef, 5> dirs;
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
}
// Lacking those, try to detect the correct set of system includes for the
// target triple.
// Add include directories specific to the selected multilib set and multilib.
if (GCCInstallation.isValid()) {
const auto &Callback = Multilibs.includeDirsCallback();
if (Callback) {
for (const auto &Path : Callback(GCCInstallation.getMultilib()))
addExternCSystemIncludeIfExists(
DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
}
}
// Implement generic Debian multiarch support.
const StringRef X86_64MultiarchIncludeDirs[] = {
"/usr/include/x86_64-linux-gnu",
// FIXME: These are older forms of multiarch. It's not clear that they're
// in use in any released version of Debian, so we should consider
// removing them.
"/usr/include/i686-linux-gnu/64", "/usr/include/i486-linux-gnu/64"};
const StringRef X86MultiarchIncludeDirs[] = {
"/usr/include/i386-linux-gnu",
// FIXME: These are older forms of multiarch. It's not clear that they're
// in use in any released version of Debian, so we should consider
// removing them.
"/usr/include/x86_64-linux-gnu/32", "/usr/include/i686-linux-gnu",
"/usr/include/i486-linux-gnu"};
const StringRef AArch64MultiarchIncludeDirs[] = {
"/usr/include/aarch64-linux-gnu"};
const StringRef ARMMultiarchIncludeDirs[] = {
"/usr/include/arm-linux-gnueabi"};
const StringRef ARMHFMultiarchIncludeDirs[] = {
"/usr/include/arm-linux-gnueabihf"};
const StringRef ARMEBMultiarchIncludeDirs[] = {
"/usr/include/armeb-linux-gnueabi"};
const StringRef ARMEBHFMultiarchIncludeDirs[] = {
"/usr/include/armeb-linux-gnueabihf"};
const StringRef MIPSMultiarchIncludeDirs[] = {"/usr/include/mips-linux-gnu"};
const StringRef MIPSELMultiarchIncludeDirs[] = {
"/usr/include/mipsel-linux-gnu"};
const StringRef MIPS64MultiarchIncludeDirs[] = {
"/usr/include/mips64-linux-gnuabi64"};
const StringRef MIPS64ELMultiarchIncludeDirs[] = {
"/usr/include/mips64el-linux-gnuabi64"};
const StringRef MIPSN32MultiarchIncludeDirs[] = {
"/usr/include/mips64-linux-gnuabin32"};
const StringRef MIPSN32ELMultiarchIncludeDirs[] = {
"/usr/include/mips64el-linux-gnuabin32"};
const StringRef MIPSR6MultiarchIncludeDirs[] = {
"/usr/include/mipsisa32-linux-gnu"};
const StringRef MIPSR6ELMultiarchIncludeDirs[] = {
"/usr/include/mipsisa32r6el-linux-gnu"};
const StringRef MIPS64R6MultiarchIncludeDirs[] = {
"/usr/include/mipsisa64r6-linux-gnuabi64"};
const StringRef MIPS64R6ELMultiarchIncludeDirs[] = {
"/usr/include/mipsisa64r6el-linux-gnuabi64"};
const StringRef MIPSN32R6MultiarchIncludeDirs[] = {
"/usr/include/mipsisa64r6-linux-gnuabin32"};
const StringRef MIPSN32R6ELMultiarchIncludeDirs[] = {
"/usr/include/mipsisa64r6el-linux-gnuabin32"};
const StringRef PPCMultiarchIncludeDirs[] = {
"/usr/include/powerpc-linux-gnu",
"/usr/include/powerpc-linux-gnuspe"};
const StringRef PPC64MultiarchIncludeDirs[] = {
"/usr/include/powerpc64-linux-gnu"};
const StringRef PPC64LEMultiarchIncludeDirs[] = {
"/usr/include/powerpc64le-linux-gnu"};
const StringRef SparcMultiarchIncludeDirs[] = {
"/usr/include/sparc-linux-gnu"};
const StringRef Sparc64MultiarchIncludeDirs[] = {
"/usr/include/sparc64-linux-gnu"};
const StringRef SYSTEMZMultiarchIncludeDirs[] = {
"/usr/include/s390x-linux-gnu"};
ArrayRef<StringRef> MultiarchIncludeDirs;
switch (getTriple().getArch()) {
case llvm::Triple::x86_64:
MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
break;
case llvm::Triple::x86:
MultiarchIncludeDirs = X86MultiarchIncludeDirs;
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
MultiarchIncludeDirs = ARMHFMultiarchIncludeDirs;
else
MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
MultiarchIncludeDirs = ARMEBHFMultiarchIncludeDirs;
else
MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
break;
case llvm::Triple::mips:
if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
MultiarchIncludeDirs = MIPSR6MultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
break;
case llvm::Triple::mipsel:
if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
MultiarchIncludeDirs = MIPSR6ELMultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
break;
case llvm::Triple::mips64:
if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
MultiarchIncludeDirs = MIPSN32R6MultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPS64R6MultiarchIncludeDirs;
else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
MultiarchIncludeDirs = MIPSN32MultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
break;
case llvm::Triple::mips64el:
if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
MultiarchIncludeDirs = MIPSN32R6ELMultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPS64R6ELMultiarchIncludeDirs;
else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
MultiarchIncludeDirs = MIPSN32ELMultiarchIncludeDirs;
else
MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
break;
case llvm::Triple::ppc:
MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
break;
case llvm::Triple::ppc64:
MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
break;
case llvm::Triple::ppc64le:
MultiarchIncludeDirs = PPC64LEMultiarchIncludeDirs;
break;
case llvm::Triple::sparc:
MultiarchIncludeDirs = SparcMultiarchIncludeDirs;
break;
case llvm::Triple::sparcv9:
MultiarchIncludeDirs = Sparc64MultiarchIncludeDirs;
break;
case llvm::Triple::systemz:
MultiarchIncludeDirs = SYSTEMZMultiarchIncludeDirs;
break;
default:
break;
}
const std::string AndroidMultiarchIncludeDir =
std::string("/usr/include/") +
getMultiarchTriple(D, getTriple(), SysRoot);
const StringRef AndroidMultiarchIncludeDirs[] = {AndroidMultiarchIncludeDir};
if (getTriple().isAndroid())
MultiarchIncludeDirs = AndroidMultiarchIncludeDirs;
for (StringRef Dir : MultiarchIncludeDirs) {
if (D.getVFS().exists(SysRoot + Dir)) {
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + Dir);
break;
}
}
if (getTriple().getOS() == llvm::Triple::RTEMS)
return;
// Add an include of '/include' directly. This isn't provided by default by
// system GCCs, but is often used with cross-compiling GCCs, and harmless to
// add even when Clang is acting as-if it were a system compiler.
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
+ addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
}
static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
StringRef base) {
std::error_code EC;
int MaxVersion = 0;
std::string MaxVersionString = "";
for (llvm::vfs::directory_iterator LI = vfs.dir_begin(base, EC), LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->path());
int Version;
if (VersionText[0] == 'v' &&
!VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
if (Version > MaxVersion) {
MaxVersion = Version;
MaxVersionString = VersionText;
}
}
}
return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
}
void Linux::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const std::string& SysRoot = computeSysRoot();
const std::string LibCXXIncludePathCandidates[] = {
DetectLibcxxIncludePath(getVFS(), getDriver().Dir + "/../include/c++"),
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
DetectLibcxxIncludePath(getVFS(), SysRoot + "/usr/local/include/c++"),
DetectLibcxxIncludePath(getVFS(), SysRoot + "/usr/include/c++") };
for (const auto &IncludePath : LibCXXIncludePathCandidates) {
if (IncludePath.empty() || !getVFS().exists(IncludePath))
continue;
// Use the first candidate that exists.
addSystemInclude(DriverArgs, CC1Args, IncludePath);
return;
}
}
void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
// We need a detected GCC installation on Linux to provide libstdc++'s
// headers.
if (!GCCInstallation.isValid())
return;
// By default, look for the C++ headers in an include directory adjacent to
// the lib directory of the GCC installation. Note that this is expect to be
// equivalent to '/usr/include/c++/X.Y' in almost all cases.
StringRef LibDir = GCCInstallation.getParentLibPath();
StringRef InstallDir = GCCInstallation.getInstallPath();
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
const std::string GCCMultiarchTriple = getMultiarchTriple(
getDriver(), GCCInstallation.getTriple(), getDriver().SysRoot);
const std::string TargetMultiarchTriple =
getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
const GCCVersion &Version = GCCInstallation.getVersion();
// The primary search for libstdc++ supports multiarch variants.
if (addLibStdCXXIncludePaths(LibDir.str() + "/../include",
"/c++/" + Version.Text, TripleStr,
GCCMultiarchTriple, TargetMultiarchTriple,
Multilib.includeSuffix(), DriverArgs, CC1Args))
return;
// Otherwise, fall back on a bunch of options which don't use multiarch
// layouts for simplicity.
const std::string LibStdCXXIncludePathCandidates[] = {
// Gentoo is weird and places its headers inside the GCC install,
// so if the first attempt to find the headers fails, try these patterns.
InstallDir.str() + "/include/g++-v" + Version.Text,
InstallDir.str() + "/include/g++-v" + Version.MajorStr + "." +
Version.MinorStr,
InstallDir.str() + "/include/g++-v" + Version.MajorStr,
// Android standalone toolchain has C++ headers in yet another place.
LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
// Freescale SDK C++ headers are directly in <sysroot>/usr/include/c++,
// without a subdirectory corresponding to the gcc version.
LibDir.str() + "/../include/c++",
// Cray's gcc installation puts headers under "g++" without a
// version suffix.
LibDir.str() + "/../include/g++",
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
if (addLibStdCXXIncludePaths(IncludePath, /*Suffix*/ "", TripleStr,
/*GCCMultiarchTriple*/ "",
/*TargetMultiarchTriple*/ "",
Multilib.includeSuffix(), DriverArgs, CC1Args))
break;
}
}
void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (GCCInstallation.isValid()) {
CC1Args.push_back("-isystem");
CC1Args.push_back(DriverArgs.MakeArgString(
GCCInstallation.getParentLibPath() + "/../" +
GCCInstallation.getTriple().str() + "/include"));
}
}
bool Linux::isPIEDefault() const {
return (getTriple().isAndroid() && !getTriple().isAndroidVersionLT(16)) ||
getTriple().isMusl() || getSanitizerArgs().requiresPIE();
}
bool Linux::isNoExecStackDefault() const {
return getTriple().isAndroid();
}
bool Linux::IsMathErrnoDefault() const {
if (getTriple().isAndroid())
return false;
return Generic_ELF::IsMathErrnoDefault();
}
SanitizerMask Linux::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
const bool IsMIPS = getTriple().isMIPS32();
const bool IsMIPS64 = getTriple().isMIPS64();
const bool IsPowerPC64 = getTriple().getArch() == llvm::Triple::ppc64 ||
getTriple().getArch() == llvm::Triple::ppc64le;
const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be;
const bool IsArmArch = getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::thumb ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumbeb;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::KernelAddress;
Res |= SanitizerKind::Memory;
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;
if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::DataFlow;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64)
Res |= SanitizerKind::Leak;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
if (IsX86_64)
Res |= SanitizerKind::KernelMemory;
if (IsX86 || IsX86_64)
Res |= SanitizerKind::Function;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
IsPowerPC64)
Res |= SanitizerKind::Scudo;
if (IsX86_64 || IsAArch64) {
Res |= SanitizerKind::HWAddress;
Res |= SanitizerKind::KernelHWAddress;
}
if (IsAArch64)
Res |= SanitizerKind::MemTag;
return Res;
}
void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
if (!needsProfileRT(Args)) return;
// Add linker option -u__llvm_runtime_variable to cause runtime
// initialization module to be linked in.
if ((!Args.hasArg(options::OPT_coverage)) &&
(!Args.hasArg(options::OPT_ftest_coverage)))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
Index: stable/12/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp (revision 356465)
@@ -1,17447 +1,17452 @@
//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for declarations.
//
//===----------------------------------------------------------------------===//
#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h" // TODO: Sema shouldn't depend on Lex
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
#include "clang/Lex/ModuleLoader.h" // TODO: Sema shouldn't depend on Lex
#include "clang/Lex/Preprocessor.h" // Included for isCodeCompletionEnabled()
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Triple.h"
#include <algorithm>
#include <cstring>
#include <functional>
using namespace clang;
using namespace sema;
Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
if (OwnedType) {
Decl *Group[2] = { OwnedType, Ptr };
return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2));
}
return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
}
namespace {
class TypeNameValidatorCCC final : public CorrectionCandidateCallback {
public:
TypeNameValidatorCCC(bool AllowInvalid, bool WantClass = false,
bool AllowTemplates = false,
bool AllowNonTemplates = true)
: AllowInvalidDecl(AllowInvalid), WantClassName(WantClass),
AllowTemplates(AllowTemplates), AllowNonTemplates(AllowNonTemplates) {
WantExpressionKeywords = false;
WantCXXNamedCasts = false;
WantRemainingKeywords = false;
}
bool ValidateCandidate(const TypoCorrection &candidate) override {
if (NamedDecl *ND = candidate.getCorrectionDecl()) {
if (!AllowInvalidDecl && ND->isInvalidDecl())
return false;
if (getAsTypeTemplateDecl(ND))
return AllowTemplates;
bool IsType = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
if (!IsType)
return false;
if (AllowNonTemplates)
return true;
// An injected-class-name of a class template (specialization) is valid
// as a template or as a non-template.
if (AllowTemplates) {
auto *RD = dyn_cast<CXXRecordDecl>(ND);
if (!RD || !RD->isInjectedClassName())
return false;
RD = cast<CXXRecordDecl>(RD->getDeclContext());
return RD->getDescribedClassTemplate() ||
isa<ClassTemplateSpecializationDecl>(RD);
}
return false;
}
return !WantClassName && candidate.isKeyword();
}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return llvm::make_unique<TypeNameValidatorCCC>(*this);
}
private:
bool AllowInvalidDecl;
bool WantClassName;
bool AllowTemplates;
bool AllowNonTemplates;
};
} // end anonymous namespace
/// Determine whether the token kind starts a simple-type-specifier.
bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
switch (Kind) {
// FIXME: Take into account the current language when deciding whether a
// token kind is a valid type specifier
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_void:
case tok::kw_char:
case tok::kw_int:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
case tok::kw_bool:
case tok::kw___underlying_type:
case tok::kw___auto_type:
return true;
case tok::annot_typename:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_typeof:
case tok::annot_decltype:
case tok::kw_decltype:
return getLangOpts().CPlusPlus;
case tok::kw_char8_t:
return getLangOpts().Char8;
default:
break;
}
return false;
}
namespace {
enum class UnqualifiedTypeNameLookupResult {
NotFound,
FoundNonType,
FoundType
};
} // end anonymous namespace
/// Tries to perform unqualified lookup of the type decls in bases for
/// dependent class.
/// \return \a NotFound if no any decls is found, \a FoundNotType if found not a
/// type decl, \a FoundType if only type decls are found.
static UnqualifiedTypeNameLookupResult
lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II,
SourceLocation NameLoc,
const CXXRecordDecl *RD) {
if (!RD->hasDefinition())
return UnqualifiedTypeNameLookupResult::NotFound;
// Look for type decls in base classes.
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (const auto &Base : RD->bases()) {
const CXXRecordDecl *BaseRD = nullptr;
if (auto *BaseTT = Base.getType()->getAs<TagType>())
BaseRD = BaseTT->getAsCXXRecordDecl();
else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) {
// Look for type decls in dependent base classes that have known primary
// templates.
if (!TST || !TST->isDependentType())
continue;
auto *TD = TST->getTemplateName().getAsTemplateDecl();
if (!TD)
continue;
if (auto *BasePrimaryTemplate =
dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl())) {
if (BasePrimaryTemplate->getCanonicalDecl() != RD->getCanonicalDecl())
BaseRD = BasePrimaryTemplate;
else if (auto *CTD = dyn_cast<ClassTemplateDecl>(TD)) {
if (const ClassTemplatePartialSpecializationDecl *PS =
CTD->findPartialSpecialization(Base.getType()))
if (PS->getCanonicalDecl() != RD->getCanonicalDecl())
BaseRD = PS;
}
}
}
if (BaseRD) {
for (NamedDecl *ND : BaseRD->lookup(&II)) {
if (!isa<TypeDecl>(ND))
return UnqualifiedTypeNameLookupResult::FoundNonType;
FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
}
if (FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound) {
switch (lookupUnqualifiedTypeNameInBase(S, II, NameLoc, BaseRD)) {
case UnqualifiedTypeNameLookupResult::FoundNonType:
return UnqualifiedTypeNameLookupResult::FoundNonType;
case UnqualifiedTypeNameLookupResult::FoundType:
FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
break;
case UnqualifiedTypeNameLookupResult::NotFound:
break;
}
}
}
}
return FoundTypeDecl;
}
static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
const IdentifierInfo &II,
SourceLocation NameLoc) {
// Lookup in the parent class template context, if any.
const CXXRecordDecl *RD = nullptr;
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (DeclContext *DC = S.CurContext;
DC && FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound;
DC = DC->getParent()) {
// Look for type decls in dependent base classes that have known primary
// templates.
RD = dyn_cast<CXXRecordDecl>(DC);
if (RD && RD->getDescribedClassTemplate())
FoundTypeDecl = lookupUnqualifiedTypeNameInBase(S, II, NameLoc, RD);
}
if (FoundTypeDecl != UnqualifiedTypeNameLookupResult::FoundType)
return nullptr;
// We found some types in dependent base classes. Recover as if the user
// wrote 'typename MyClass::II' instead of 'II'. We'll fully resolve the
// lookup during template instantiation.
S.Diag(NameLoc, diag::ext_found_via_dependent_bases_lookup) << &II;
ASTContext &Context = S.Context;
auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false,
cast<Type>(Context.getRecordType(RD)));
QualType T = Context.getDependentNameType(ETK_Typename, NNS, &II);
CXXScopeSpec SS;
SS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
TypeLocBuilder Builder;
DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
DepTL.setNameLoc(NameLoc);
DepTL.setElaboratedKeywordLoc(SourceLocation());
DepTL.setQualifierLoc(SS.getWithLocInContext(Context));
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
/// This routine performs ordinary name lookup of the identifier II
/// within the given scope, with optional C++ scope specifier SS, to
/// determine whether the name refers to a type. If so, returns an
/// opaque pointer (actually a QualType) corresponding to that
/// type. Otherwise, returns NULL.
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS,
bool isClassName, bool HasTrailingDot,
ParsedType ObjectTypePtr,
bool IsCtorOrDtorName,
bool WantNontrivialTypeSourceInfo,
bool IsClassTemplateDeductionContext,
IdentifierInfo **CorrectedII) {
// FIXME: Consider allowing this outside C++1z mode as an extension.
bool AllowDeducedTemplate = IsClassTemplateDeductionContext &&
getLangOpts().CPlusPlus17 && !IsCtorOrDtorName &&
!isClassName && !HasTrailingDot;
// Determine where we will perform name lookup.
DeclContext *LookupCtx = nullptr;
if (ObjectTypePtr) {
QualType ObjectType = ObjectTypePtr.get();
if (ObjectType->isRecordType())
LookupCtx = computeDeclContext(ObjectType);
} else if (SS && SS->isNotEmpty()) {
LookupCtx = computeDeclContext(*SS, false);
if (!LookupCtx) {
if (isDependentScopeSpecifier(*SS)) {
// C++ [temp.res]p3:
// A qualified-id that refers to a type and in which the
// nested-name-specifier depends on a template-parameter (14.6.2)
// shall be prefixed by the keyword typename to indicate that the
// qualified-id denotes a type, forming an
// elaborated-type-specifier (7.1.5.3).
//
// We therefore do not perform any name lookup if the result would
// refer to a member of an unknown specialization.
if (!isClassName && !IsCtorOrDtorName)
return nullptr;
// We know from the grammar that this name refers to a type,
// so build a dependent node to describe the type.
if (WantNontrivialTypeSourceInfo)
return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get();
NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context);
QualType T = CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc,
II, NameLoc);
return ParsedType::make(T);
}
return nullptr;
}
if (!LookupCtx->isDependentContext() &&
RequireCompleteDeclContext(*SS, LookupCtx))
return nullptr;
}
// FIXME: LookupNestedNameSpecifierName isn't the right kind of
// lookup for class-names.
LookupNameKind Kind = isClassName ? LookupNestedNameSpecifierName :
LookupOrdinaryName;
LookupResult Result(*this, &II, NameLoc, Kind);
if (LookupCtx) {
// Perform "qualified" name lookup into the declaration context we
// computed, which is either the type of the base of a member access
// expression or the declaration context associated with a prior
// nested-name-specifier.
LookupQualifiedName(Result, LookupCtx);
if (ObjectTypePtr && Result.empty()) {
// C++ [basic.lookup.classref]p3:
// If the unqualified-id is ~type-name, the type-name is looked up
// in the context of the entire postfix-expression. If the type T of
// the object expression is of a class type C, the type-name is also
// looked up in the scope of class C. At least one of the lookups shall
// find a name that refers to (possibly cv-qualified) T.
LookupName(Result, S);
}
} else {
// Perform unqualified name lookup.
LookupName(Result, S);
// For unqualified lookup in a class template in MSVC mode, look into
// dependent base classes where the primary class template is known.
if (Result.empty() && getLangOpts().MSVCCompat && (!SS || SS->isEmpty())) {
if (ParsedType TypeInBase =
recoverFromTypeInKnownDependentBase(*this, II, NameLoc))
return TypeInBase;
}
}
NamedDecl *IIDecl = nullptr;
switch (Result.getResultKind()) {
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
TypeNameValidatorCCC CCC(/*AllowInvalid=*/true, isClassName,
AllowDeducedTemplate);
TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(), Kind,
S, SS, CCC, CTK_ErrorRecovery);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
TemplateTy Template;
bool MemberOfUnknownSpecialization;
UnqualifiedId TemplateName;
TemplateName.setIdentifier(NewII, NameLoc);
NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
CXXScopeSpec NewSS, *NewSSPtr = SS;
if (SS && NNS) {
NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
NewSSPtr = &NewSS;
}
if (Correction && (NNS || NewII != &II) &&
// Ignore a correction to a template type as the to-be-corrected
// identifier is not a template (typo correction for template names
// is handled elsewhere).
!(getLangOpts().CPlusPlus && NewSSPtr &&
isTemplateName(S, *NewSSPtr, false, TemplateName, nullptr, false,
Template, MemberOfUnknownSpecialization))) {
ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr,
isClassName, HasTrailingDot, ObjectTypePtr,
IsCtorOrDtorName,
WantNontrivialTypeSourceInfo,
IsClassTemplateDeductionContext);
if (Ty) {
diagnoseTypo(Correction,
PDiag(diag::err_unknown_type_or_class_name_suggest)
<< Result.getLookupName() << isClassName);
if (SS && NNS)
SS->MakeTrivial(Context, NNS, SourceRange(NameLoc));
*CorrectedII = NewII;
return Ty;
}
}
}
// If typo correction failed or was not performed, fall through
LLVM_FALLTHROUGH;
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
Result.suppressDiagnostics();
return nullptr;
case LookupResult::Ambiguous:
// Recover from type-hiding ambiguities by hiding the type. We'll
// do the lookup again when looking for an object, and we can
// diagnose the error then. If we don't do this, then the error
// about hiding the type will be immediately followed by an error
// that only makes sense if the identifier was treated like a type.
if (Result.getAmbiguityKind() == LookupResult::AmbiguousTagHiding) {
Result.suppressDiagnostics();
return nullptr;
}
// Look to see if we have a type anywhere in the list of results.
for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
Res != ResEnd; ++Res) {
if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res) ||
(AllowDeducedTemplate && getAsTypeTemplateDecl(*Res))) {
if (!IIDecl ||
(*Res)->getLocation().getRawEncoding() <
IIDecl->getLocation().getRawEncoding())
IIDecl = *Res;
}
}
if (!IIDecl) {
// None of the entities we found is a type, so there is no way
// to even assume that the result is a type. In this case, don't
// complain about the ambiguity. The parser will either try to
// perform this lookup again (e.g., as an object name), which
// will produce the ambiguity, or will complain that it expected
// a type name.
Result.suppressDiagnostics();
return nullptr;
}
// We found a type within the ambiguous lookup; diagnose the
// ambiguity and then return that type. This might be the right
// answer, or it might not be, but it suppresses any attempt to
// perform the name lookup again.
break;
case LookupResult::Found:
IIDecl = Result.getFoundDecl();
break;
}
assert(IIDecl && "Didn't find decl");
QualType T;
if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
// C++ [class.qual]p2: A lookup that would find the injected-class-name
// instead names the constructors of the class, except when naming a class.
// This is ill-formed when we're not actually forming a ctor or dtor name.
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(TD);
if (!isClassName && !IsCtorOrDtorName && LookupRD && FoundRD &&
FoundRD->isInjectedClassName() &&
declaresSameEntity(LookupRD, cast<Decl>(FoundRD->getParent())))
Diag(NameLoc, diag::err_out_of_line_qualified_id_type_names_constructor)
<< &II << /*Type*/1;
DiagnoseUseOfDecl(IIDecl, NameLoc);
T = Context.getTypeDeclType(TD);
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
} else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
if (!HasTrailingDot)
T = Context.getObjCInterfaceType(IDecl);
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl))
T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
QualType(), false);
}
if (T.isNull()) {
// If it's not plausibly a type, suppress diagnostics.
Result.suppressDiagnostics();
return nullptr;
}
// NOTE: avoid constructing an ElaboratedType(Loc) if this is a
// constructor or destructor name (in such a case, the scope specifier
// will be attached to the enclosing Expr or Decl node).
if (SS && SS->isNotEmpty() && !IsCtorOrDtorName &&
!isa<ObjCInterfaceDecl>(IIDecl)) {
if (WantNontrivialTypeSourceInfo) {
// Construct a type with type-source information.
TypeLocBuilder Builder;
Builder.pushTypeSpec(T).setNameLoc(NameLoc);
T = getElaboratedType(ETK_None, *SS, T);
ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
} else {
T = getElaboratedType(ETK_None, *SS, T);
}
}
return ParsedType::make(T);
}
// Builds a fake NNS for the given decl context.
static NestedNameSpecifier *
synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) {
for (;; DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
auto *ND = dyn_cast<NamespaceDecl>(DC);
if (ND && !ND->isInline() && !ND->isAnonymousNamespace())
return NestedNameSpecifier::Create(Context, nullptr, ND);
else if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
return NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(),
RD->getTypeForDecl());
else if (isa<TranslationUnitDecl>(DC))
return NestedNameSpecifier::GlobalSpecifier(Context);
}
llvm_unreachable("something isn't in TU scope?");
}
/// Find the parent class with dependent bases of the innermost enclosing method
/// context. Do not look for enclosing CXXRecordDecls directly, or we will end
/// up allowing unqualified dependent type names at class-level, which MSVC
/// correctly rejects.
static const CXXRecordDecl *
findRecordWithDependentBasesOfEnclosingMethod(const DeclContext *DC) {
for (; DC && DC->isDependentContext(); DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
if (const auto *MD = dyn_cast<CXXMethodDecl>(DC))
if (MD->getParent()->hasAnyDependentBases())
return MD->getParent();
}
return nullptr;
}
ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg) {
assert(getLangOpts().MSVCCompat && "shouldn't be called in non-MSVC mode");
NestedNameSpecifier *NNS = nullptr;
if (IsTemplateTypeArg && getCurScope()->isTemplateParamScope()) {
// If we weren't able to parse a default template argument, delay lookup
// until instantiation time by making a non-dependent DependentTypeName. We
// pretend we saw a NestedNameSpecifier referring to the current scope, and
// lookup is retried.
// FIXME: This hurts our diagnostic quality, since we get errors like "no
// type named 'Foo' in 'current_namespace'" when the user didn't write any
// name specifiers.
NNS = synthesizeCurrentNestedNameSpecifier(Context, CurContext);
Diag(NameLoc, diag::ext_ms_delayed_template_argument) << &II;
} else if (const CXXRecordDecl *RD =
findRecordWithDependentBasesOfEnclosingMethod(CurContext)) {
// Build a DependentNameType that will perform lookup into RD at
// instantiation time.
NNS = NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(),
RD->getTypeForDecl());
// Diagnose that this identifier was undeclared, and retry the lookup during
// template instantiation.
Diag(NameLoc, diag::ext_undeclared_unqual_id_with_dependent_base) << &II
<< RD;
} else {
// This is not a situation that we should recover from.
return ParsedType();
}
QualType T = Context.getDependentNameType(ETK_None, NNS, &II);
// Build type location information. We synthesized the qualifier, so we have
// to build a fake NestedNameSpecifierLoc.
NestedNameSpecifierLocBuilder NNSLocBuilder;
NNSLocBuilder.MakeTrivial(Context, NNS, SourceRange(NameLoc));
NestedNameSpecifierLoc QualifierLoc = NNSLocBuilder.getWithLocInContext(Context);
TypeLocBuilder Builder;
DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
DepTL.setNameLoc(NameLoc);
DepTL.setElaboratedKeywordLoc(SourceLocation());
DepTL.setQualifierLoc(QualifierLoc);
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
/// isTagName() - This method is called *for error recovery purposes only*
/// to determine if the specified name is a valid tag name ("struct foo"). If
/// so, this returns the TST for the tag corresponding to it (TST_enum,
/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose
/// cases in C where the user forgot to specify the tag.
DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
// Do a tag name lookup in this scope.
LookupResult R(*this, &II, SourceLocation(), LookupTagName);
LookupName(R, S, false);
R.suppressDiagnostics();
if (R.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
switch (TD->getTagKind()) {
case TTK_Struct: return DeclSpec::TST_struct;
case TTK_Interface: return DeclSpec::TST_interface;
case TTK_Union: return DeclSpec::TST_union;
case TTK_Class: return DeclSpec::TST_class;
case TTK_Enum: return DeclSpec::TST_enum;
}
}
return DeclSpec::TST_unspecified;
}
/// isMicrosoftMissingTypename - In Microsoft mode, within class scope,
/// if a CXXScopeSpec's type is equal to the type of one of the base classes
/// then downgrade the missing typename error to a warning.
/// This is needed for MSVC compatibility; Example:
/// @code
/// template<class T> class A {
/// public:
/// typedef int TYPE;
/// };
/// template<class T> class B : public A<T> {
/// public:
/// A<T>::TYPE a; // no typename required because A<T> is a base class.
/// };
/// @endcode
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
if (CurContext->isRecord()) {
if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
return true;
const Type *Ty = SS->getScopeRep()->getAsType();
CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
for (const auto &Base : RD->bases())
if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
return true;
return S->isFunctionPrototypeScope();
}
return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
}
void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName) {
// Don't report typename errors for editor placeholders.
if (II->isEditorPlaceholder())
return;
// We don't have anything to suggest (yet).
SuggestedType = nullptr;
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
TypeNameValidatorCCC CCC(/*AllowInvalid=*/false, /*WantClass=*/false,
/*AllowTemplates=*/IsTemplateName,
/*AllowNonTemplates=*/!IsTemplateName);
if (TypoCorrection Corrected =
CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS,
CCC, CTK_ErrorRecovery)) {
// FIXME: Support error recovery for the template-name case.
bool CanRecover = !IsTemplateName;
if (Corrected.isKeyword()) {
// We corrected to a keyword.
diagnoseTypo(Corrected,
PDiag(IsTemplateName ? diag::err_no_template_suggest
: diag::err_unknown_typename_suggest)
<< II);
II = Corrected.getCorrectionAsIdentifierInfo();
} else {
// We found a similarly-named type or interface; suggest that.
if (!SS || !SS->isSet()) {
diagnoseTypo(Corrected,
PDiag(IsTemplateName ? diag::err_no_template_suggest
: diag::err_unknown_typename_suggest)
<< II, CanRecover);
} else if (DeclContext *DC = computeDeclContext(*SS, false)) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
II->getName().equals(CorrectedStr);
diagnoseTypo(Corrected,
PDiag(IsTemplateName
? diag::err_no_member_template_suggest
: diag::err_unknown_nested_typename_suggest)
<< II << DC << DroppedSpecifier << SS->getRange(),
CanRecover);
} else {
llvm_unreachable("could not have corrected a typo here");
}
if (!CanRecover)
return;
CXXScopeSpec tmpSS;
if (Corrected.getCorrectionSpecifier())
tmpSS.MakeTrivial(Context, Corrected.getCorrectionSpecifier(),
SourceRange(IILoc));
// FIXME: Support class template argument deduction here.
SuggestedType =
getTypeName(*Corrected.getCorrectionAsIdentifierInfo(), IILoc, S,
tmpSS.isSet() ? &tmpSS : SS, false, false, nullptr,
/*IsCtorOrDtorName=*/false,
/*WantNontrivialTypeSourceInfo=*/true);
}
return;
}
if (getLangOpts().CPlusPlus && !IsTemplateName) {
// See if II is a class template that the user forgot to pass arguments to.
UnqualifiedId Name;
Name.setIdentifier(II, IILoc);
CXXScopeSpec EmptySS;
TemplateTy TemplateResult;
bool MemberOfUnknownSpecialization;
if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false,
Name, nullptr, true, TemplateResult,
MemberOfUnknownSpecialization) == TNK_Type_template) {
diagnoseMissingTemplateArguments(TemplateResult.get(), IILoc);
return;
}
}
// FIXME: Should we move the logic that tries to recover from a missing tag
// (struct, union, enum) from Parser::ParseImplicitInt here, instead?
if (!SS || (!SS->isSet() && !SS->isInvalid()))
Diag(IILoc, IsTemplateName ? diag::err_no_template
: diag::err_unknown_typename)
<< II;
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
Diag(SS->getRange().getBegin(), DiagID)
<< SS->getScopeRep() << II->getName()
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
SuggestedType = ActOnTypenameType(S, SourceLocation(),
*SS, *II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
}
}
/// Determine whether the given result set contains either a type name
/// or
static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
NextToken.is(tok::less);
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
if (isa<TypeDecl>(*I) || isa<ObjCInterfaceDecl>(*I))
return true;
if (CheckTemplate && isa<TemplateDecl>(*I))
return true;
}
return false;
}
static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName);
SemaRef.LookupParsedName(R, S, &SS);
if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
StringRef FixItTagName;
switch (Tag->getTagKind()) {
case TTK_Class:
FixItTagName = "class ";
break;
case TTK_Enum:
FixItTagName = "enum ";
break;
case TTK_Struct:
FixItTagName = "struct ";
break;
case TTK_Interface:
FixItTagName = "__interface ";
break;
case TTK_Union:
FixItTagName = "union ";
break;
}
StringRef TagName = FixItTagName.drop_back();
SemaRef.Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
<< Name << TagName << SemaRef.getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(NameLoc, FixItTagName);
for (LookupResult::iterator I = Result.begin(), IEnd = Result.end();
I != IEnd; ++I)
SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
<< Name << TagName;
// Replace lookup results with just the tag decl.
Result.clear(Sema::LookupTagName);
SemaRef.LookupParsedName(Result, S, &SS);
return true;
}
return false;
}
/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
QualType T, SourceLocation NameLoc) {
ASTContext &Context = S.Context;
TypeLocBuilder Builder;
Builder.pushTypeSpec(T).setNameLoc(NameLoc);
T = S.getElaboratedType(ETK_None, SS, T);
ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
Sema::NameClassification
Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand, CorrectionCandidateCallback *CCC) {
DeclarationNameInfo NameInfo(Name, NameLoc);
ObjCMethodDecl *CurMethod = getCurMethodDecl();
if (NextToken.is(tok::coloncolon)) {
NestedNameSpecInfo IdInfo(Name, NameLoc, NextToken.getLocation());
BuildCXXNestedNameSpecifier(S, IdInfo, false, SS, nullptr, false);
} else if (getLangOpts().CPlusPlus && SS.isSet() &&
isCurrentClassName(*Name, S, &SS)) {
// Per [class.qual]p2, this names the constructors of SS, not the
// injected-class-name. We don't have a classification for that.
// There's not much point caching this result, since the parser
// will reject it later.
return NameClassification::Unknown();
}
LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
LookupParsedName(Result, S, &SS, !CurMethod);
// For unqualified lookup in a class template in MSVC mode, look into
// dependent base classes where the primary class template is known.
if (Result.empty() && SS.isEmpty() && getLangOpts().MSVCCompat) {
if (ParsedType TypeInBase =
recoverFromTypeInKnownDependentBase(*this, *Name, NameLoc))
return TypeInBase;
}
// Perform lookup for Objective-C instance variables (including automatically
// synthesized instance variables), if we're in an Objective-C method.
// FIXME: This lookup really, really needs to be folded in to the normal
// unqualified lookup mechanism.
if (!SS.isSet() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) {
ExprResult E = LookupInObjCMethod(Result, S, Name, true);
if (E.get() || E.isInvalid())
return E;
}
bool SecondTry = false;
bool IsFilteredTemplateName = false;
Corrected:
switch (Result.getResultKind()) {
case LookupResult::NotFound:
// If an unqualified-id is followed by a '(', then we have a function
// call.
if (!SS.isSet() && NextToken.is(tok::l_paren)) {
// In C++, this is an ADL-only call.
// FIXME: Reference?
if (getLangOpts().CPlusPlus)
return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true);
// C90 6.3.2.2:
// If the expression that precedes the parenthesized argument list in a
// function call consists solely of an identifier, and if no
// declaration is visible for this identifier, the identifier is
// implicitly declared exactly as if, in the innermost block containing
// the function call, the declaration
//
// extern int identifier ();
//
// appeared.
//
// We also allow this in C99 as an extension.
if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S)) {
Result.addDecl(D);
Result.resolveKind();
return BuildDeclarationNameExpr(SS, Result, /*ADL=*/false);
}
}
if (getLangOpts().CPlusPlus2a && !SS.isSet() && NextToken.is(tok::less)) {
// In C++20 onwards, this could be an ADL-only call to a function
// template, and we're required to assume that this is a template name.
//
// FIXME: Find a way to still do typo correction in this case.
TemplateName Template =
Context.getAssumedTemplateName(NameInfo.getName());
return NameClassification::UndeclaredTemplate(Template);
}
// In C, we first see whether there is a tag type by the same name, in
// which case it's likely that the user just forgot to write "enum",
// "struct", or "union".
if (!getLangOpts().CPlusPlus && !SecondTry &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
break;
}
// Perform typo correction to determine if there is another name that is
// close to this name.
if (!SecondTry && CCC) {
SecondTry = true;
if (TypoCorrection Corrected =
CorrectTypo(Result.getLookupNameInfo(), Result.getLookupKind(), S,
&SS, *CCC, CTK_ErrorRecovery)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
NamedDecl *FirstDecl = Corrected.getFoundDecl();
NamedDecl *UnderlyingFirstDecl = Corrected.getCorrectionDecl();
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
UnqualifiedDiag = diag::err_no_template_suggest;
QualifiedDiag = diag::err_no_member_template_suggest;
} else if (UnderlyingFirstDecl &&
(isa<TypeDecl>(UnderlyingFirstDecl) ||
isa<ObjCInterfaceDecl>(UnderlyingFirstDecl) ||
isa<ObjCCompatibleAliasDecl>(UnderlyingFirstDecl))) {
UnqualifiedDiag = diag::err_unknown_typename_suggest;
QualifiedDiag = diag::err_unknown_nested_typename_suggest;
}
if (SS.isEmpty()) {
diagnoseTypo(Corrected, PDiag(UnqualifiedDiag) << Name);
} else {// FIXME: is this even reachable? Test it.
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
Name->getName().equals(CorrectedStr);
diagnoseTypo(Corrected, PDiag(QualifiedDiag)
<< Name << computeDeclContext(SS, false)
<< DroppedSpecifier << SS.getRange());
}
// Update the name, so that the caller has the new name.
Name = Corrected.getCorrectionAsIdentifierInfo();
// Typo correction corrected to a keyword.
if (Corrected.isKeyword())
return Name;
// Also update the LookupResult...
// FIXME: This should probably go away at some point
Result.clear();
Result.setLookupName(Corrected.getCorrection());
if (FirstDecl)
Result.addDecl(FirstDecl);
// If we found an Objective-C instance variable, let
// LookupInObjCMethod build the appropriate expression to
// reference the ivar.
// FIXME: This is a gross hack.
if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
Result.clear();
ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier()));
return E;
}
goto Corrected;
}
}
// We failed to correct; just fall through and let the parser deal with it.
Result.suppressDiagnostics();
return NameClassification::Unknown();
case LookupResult::NotFoundInCurrentInstantiation: {
// We performed name lookup into the current instantiation, and there were
// dependent bases, so we treat this result the same way as any other
// dependent nested-name-specifier.
// C++ [temp.res]p2:
// A name used in a template declaration or definition and that is
// dependent on a template-parameter is assumed not to name a type
// unless the applicable name lookup finds a type name or the name is
// qualified by the keyword typename.
//
// FIXME: If the next token is '<', we might want to ask the parser to
// perform some heroics to see if we actually have a
// template-argument-list, which would indicate a missing 'template'
// keyword here.
return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, IsAddressOfOperand,
/*TemplateArgs=*/nullptr);
}
case LookupResult::Found:
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
break;
case LookupResult::Ambiguous:
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
hasAnyAcceptableTemplateNames(Result, /*AllowFunctionTemplates=*/true,
/*AllowDependent=*/false)) {
// C++ [temp.local]p3:
// A lookup that finds an injected-class-name (10.2) can result in an
// ambiguity in certain cases (for example, if it is found in more than
// one base class). If all of the injected-class-names that are found
// refer to specializations of the same class template, and if the name
// is followed by a template-argument-list, the reference refers to the
// class template itself and not a specialization thereof, and is not
// ambiguous.
//
// This filtering can make an ambiguous result into an unambiguous one,
// so try again after filtering out template names.
FilterAcceptableTemplateNames(Result);
if (!Result.isAmbiguous()) {
IsFilteredTemplateName = true;
break;
}
}
// Diagnose the ambiguity and return an error.
return NameClassification::Error();
}
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
(IsFilteredTemplateName ||
hasAnyAcceptableTemplateNames(
Result, /*AllowFunctionTemplates=*/true,
/*AllowDependent=*/false,
/*AllowNonTemplateFunctions*/ !SS.isSet() &&
getLangOpts().CPlusPlus2a))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
// an operator-function-id or a literal- operator-id refers to a set of
// overloaded functions any member of which is a function template if
// this is followed by a <, the < is always taken as the delimiter of a
// template-argument-list and never as the less-than operator.
// C++2a [temp.names]p2:
// A name is also considered to refer to a template if it is an
// unqualified-id followed by a < and name lookup finds either one
// or more functions or finds nothing.
if (!IsFilteredTemplateName)
FilterAcceptableTemplateNames(Result);
bool IsFunctionTemplate;
bool IsVarTemplate;
TemplateName Template;
if (Result.end() - Result.begin() > 1) {
IsFunctionTemplate = true;
Template = Context.getOverloadedTemplateName(Result.begin(),
Result.end());
} else if (!Result.empty()) {
auto *TD = cast<TemplateDecl>(getAsTemplateNameDecl(
*Result.begin(), /*AllowFunctionTemplates=*/true,
/*AllowDependent=*/false));
IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
IsVarTemplate = isa<VarTemplateDecl>(TD);
if (SS.isSet() && !SS.isInvalid())
Template =
Context.getQualifiedTemplateName(SS.getScopeRep(),
/*TemplateKeyword=*/false, TD);
else
Template = TemplateName(TD);
} else {
// All results were non-template functions. This is a function template
// name.
IsFunctionTemplate = true;
Template = Context.getAssumedTemplateName(NameInfo.getName());
}
if (IsFunctionTemplate) {
// Function templates always go through overload resolution, at which
// point we'll perform the various checks (e.g., accessibility) we need
// to based on which function we selected.
Result.suppressDiagnostics();
return NameClassification::FunctionTemplate(Template);
}
return IsVarTemplate ? NameClassification::VarTemplate(Template)
: NameClassification::TypeTemplate(Template);
}
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
QualType T = Context.getTypeDeclType(Type);
if (SS.isNotEmpty())
return buildNestedType(*this, SS, T, NameLoc);
return ParsedType::make(T);
}
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
if (!Class) {
// FIXME: It's unfortunate that we don't have a Type node for handling this.
if (ObjCCompatibleAliasDecl *Alias =
dyn_cast<ObjCCompatibleAliasDecl>(FirstDecl))
Class = Alias->getClassInterface();
}
if (Class) {
DiagnoseUseOfDecl(Class, NameLoc);
if (NextToken.is(tok::period)) {
// Interface. <something> is parsed as a property reference expression.
// Just return "unknown" as a fall-through for now.
Result.suppressDiagnostics();
return NameClassification::Unknown();
}
QualType T = Context.getObjCInterfaceType(Class);
return ParsedType::make(T);
}
// We can have a type template here if we're classifying a template argument.
if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl) &&
!isa<VarTemplateDecl>(FirstDecl))
return NameClassification::TypeTemplate(
TemplateName(cast<TemplateDecl>(FirstDecl)));
// Check for a tag type hidden by a non-type decl in a few cases where it
// seems likely a type is wanted instead of the non-type that was found.
bool NextIsOp = NextToken.isOneOf(tok::amp, tok::star);
if ((NextToken.is(tok::identifier) ||
(NextIsOp &&
FirstDecl->getUnderlyingDecl()->isFunctionOrFunctionTemplate())) &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
TypeDecl *Type = Result.getAsSingle<TypeDecl>();
DiagnoseUseOfDecl(Type, NameLoc);
QualType T = Context.getTypeDeclType(Type);
if (SS.isNotEmpty())
return buildNestedType(*this, SS, T, NameLoc);
return ParsedType::make(T);
}
if (FirstDecl->isCXXClassMember())
return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result,
nullptr, S);
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
return BuildDeclarationNameExpr(SS, Result, ADL);
}
Sema::TemplateNameKindForDiagnostics
Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
auto *TD = Name.getAsTemplateDecl();
if (!TD)
return TemplateNameKindForDiagnostics::DependentTemplate;
if (isa<ClassTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::ClassTemplate;
if (isa<FunctionTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::FunctionTemplate;
if (isa<VarTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::VarTemplate;
if (isa<TypeAliasTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::AliasTemplate;
if (isa<TemplateTemplateParmDecl>(TD))
return TemplateNameKindForDiagnostics::TemplateTemplateParam;
if (isa<ConceptDecl>(TD))
return TemplateNameKindForDiagnostics::Concept;
return TemplateNameKindForDiagnostics::DependentTemplate;
}
// Determines the context to return to after temporarily entering a
// context. This depends in an unnecessarily complicated way on the
// exact ordering of callbacks from the parser.
DeclContext *Sema::getContainingDC(DeclContext *DC) {
// Functions defined inline within classes aren't parsed until we've
// finished parsing the top-level class, so the top-level class is
// the context we'll need to return to.
// A Lambda call operator whose parent is a class must not be treated
// as an inline member function. A Lambda can be used legally
// either as an in-class member initializer or a default argument. These
// are parsed once the class has been marked complete and so the containing
// context would be the nested class (when the lambda is defined in one);
// If the class is not complete, then the lambda is being used in an
// ill-formed fashion (such as to specify the width of a bit-field, or
// in an array-bound) - in which case we still want to return the
// lexically containing DC (which could be a nested class).
if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) {
DC = DC->getLexicalParent();
// A function not defined within a class will always return to its
// lexical context.
if (!isa<CXXRecordDecl>(DC))
return DC;
// A C++ inline method/friend is parsed *after* the topmost class
// it was declared in is fully parsed ("complete"); the topmost
// class is the context we need to return to.
while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
DC = RD;
// Return the declaration context of the topmost class the inline method is
// declared in.
return DC;
}
return DC->getLexicalParent();
}
void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
assert(getContainingDC(DC) == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = DC;
S->setEntity(DC);
}
void Sema::PopDeclContext() {
assert(CurContext && "DeclContext imbalance!");
CurContext = getContainingDC(CurContext);
assert(CurContext && "Popped translation unit!");
}
Sema::SkippedDefinitionContext Sema::ActOnTagStartSkippedDefinition(Scope *S,
Decl *D) {
// Unlike PushDeclContext, the context to which we return is not necessarily
// the containing DC of TD, because the new context will be some pre-existing
// TagDecl definition instead of a fresh one.
auto Result = static_cast<SkippedDefinitionContext>(CurContext);
CurContext = cast<TagDecl>(D)->getDefinition();
assert(CurContext && "skipping definition of undefined tag");
// Start lookups from the parent of the current context; we don't want to look
// into the pre-existing complete definition.
S->setEntity(CurContext->getLookupParent());
return Result;
}
void Sema::ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context) {
CurContext = static_cast<decltype(CurContext)>(Context);
}
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
///
void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
// C++0x [basic.lookup.unqual]p13:
// A name used in the definition of a static data member of class
// X (after the qualified-id of the static member) is looked up as
// if the name was used in a member function of X.
// C++0x [basic.lookup.unqual]p14:
// If a variable member of a namespace is defined outside of the
// scope of its namespace then any name used in the definition of
// the variable member (after the declarator-id) is looked up as
// if the definition of the variable member occurred in its
// namespace.
// Both of these imply that we should push a scope whose context
// is the semantic context of the declaration. We can't use
// PushDeclContext here because that context is not necessarily
// lexically contained in the current context. Fortunately,
// the containing scope should have the appropriate information.
assert(!S->getEntity() && "scope already has entity");
#ifndef NDEBUG
Scope *Ancestor = S->getParent();
while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
assert(Ancestor->getEntity() == CurContext && "ancestor context mismatch");
#endif
CurContext = DC;
S->setEntity(DC);
}
void Sema::ExitDeclaratorContext(Scope *S) {
assert(S->getEntity() == CurContext && "Context imbalance!");
// Switch back to the lexical context. The safety of this is
// enforced by an assert in EnterDeclaratorContext.
Scope *Ancestor = S->getParent();
while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
CurContext = Ancestor->getEntity();
// We don't need to do anything with the scope, which is going to
// disappear.
}
void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
// We assume that the caller has already called
// ActOnReenterTemplateScope so getTemplatedDecl() works.
FunctionDecl *FD = D->getAsFunction();
if (!FD)
return;
// Same implementation as PushDeclContext, but enters the context
// from the lexical parent, rather than the top-level class.
assert(CurContext == FD->getLexicalParent() &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = FD;
S->setEntity(CurContext);
for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) {
ParmVarDecl *Param = FD->getParamDecl(P);
// If the parameter has an identifier, then add it to the scope
if (Param->getIdentifier()) {
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
}
}
void Sema::ActOnExitFunctionContext() {
// Same implementation as PopDeclContext, but returns to the lexical parent,
// rather than the top-level class.
assert(CurContext && "DeclContext imbalance!");
CurContext = CurContext->getLexicalParent();
assert(CurContext && "Popped translation unit!");
}
/// Determine whether we allow overloading of the function
/// PrevDecl with another declaration.
///
/// This routine determines whether overloading is possible, not
/// whether some new function is actually an overload. It will return
/// true in C++ (where we can always provide overloads) or, as an
/// extension, in C when the previous function is already an
/// overloaded function declaration or has the "overloadable"
/// attribute.
static bool AllowOverloadingOfFunction(LookupResult &Previous,
ASTContext &Context,
const FunctionDecl *New) {
if (Context.getLangOpts().CPlusPlus)
return true;
if (Previous.getResultKind() == LookupResult::FoundOverloaded)
return true;
return Previous.getResultKind() == LookupResult::Found &&
(Previous.getFoundDecl()->hasAttr<OverloadableAttr>() ||
New->hasAttr<OverloadableAttr>());
}
/// Add this decl to the scope shadowed decl chains.
void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
// Move up the scope chain until we find the nearest enclosing
// non-transparent context. The declaration will be introduced into this
// scope.
while (S->getEntity() && S->getEntity()->isTransparentContext())
S = S->getParent();
// Add scoped declarations into their context, so that they can be
// found later. Declarations without a context won't be inserted
// into any context.
if (AddToContext)
CurContext->addDecl(D);
// Out-of-line definitions shouldn't be pushed into scope in C++, unless they
// are function-local declarations.
if (getLangOpts().CPlusPlus && D->isOutOfLine() &&
!D->getDeclContext()->getRedeclContext()->Equals(
D->getLexicalDeclContext()->getRedeclContext()) &&
!D->getLexicalDeclContext()->isFunctionOrMethod())
return;
// Template instantiations should also not be pushed into scope.
if (isa<FunctionDecl>(D) &&
cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
return;
// If this replaces anything in the current scope,
IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()),
IEnd = IdResolver.end();
for (; I != IEnd; ++I) {
if (S->isDeclScope(*I) && D->declarationReplaces(*I)) {
S->RemoveDecl(*I);
IdResolver.RemoveDecl(*I);
// Should only need to replace one decl.
break;
}
}
S->AddDecl(D);
if (isa<LabelDecl>(D) && !cast<LabelDecl>(D)->isGnuLocal()) {
// Implicitly-generated labels may end up getting generated in an order that
// isn't strictly lexical, which breaks name lookup. Be careful to insert
// the label at the appropriate place in the identifier chain.
for (I = IdResolver.begin(D->getDeclName()); I != IEnd; ++I) {
DeclContext *IDC = (*I)->getLexicalDeclContext()->getRedeclContext();
if (IDC == CurContext) {
if (!S->isDeclScope(*I))
continue;
} else if (IDC->Encloses(CurContext))
break;
}
IdResolver.InsertDeclAfter(I, D);
} else {
IdResolver.AddDecl(D);
}
}
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
bool AllowInlineNamespace) {
return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
}
Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) {
DeclContext *TargetDC = DC->getPrimaryContext();
do {
if (DeclContext *ScopeDC = S->getEntity())
if (ScopeDC->getPrimaryContext() == TargetDC)
return S;
} while ((S = S->getParent()));
return nullptr;
}
static bool isOutOfScopePreviousDeclaration(NamedDecl *,
DeclContext*,
ASTContext&);
/// Filters out lookup results that don't fall within the given scope
/// as determined by isDeclInScope.
void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage,
bool AllowInlineNamespace) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext()) {
NamedDecl *D = F.next();
if (isDeclInScope(D, Ctx, S, AllowInlineNamespace))
continue;
if (ConsiderLinkage && isOutOfScopePreviousDeclaration(D, Ctx, Context))
continue;
F.erase();
}
F.done();
}
/// We've determined that \p New is a redeclaration of \p Old. Check that they
/// have compatible owning modules.
bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
// FIXME: The Modules TS is not clear about how friend declarations are
// to be treated. It's not meaningful to have different owning modules for
// linkage in redeclarations of the same entity, so for now allow the
// redeclaration and change the owning modules to match.
if (New->getFriendObjectKind() &&
Old->getOwningModuleForLinkage() != New->getOwningModuleForLinkage()) {
New->setLocalOwningModule(Old->getOwningModule());
makeMergedDefinitionVisible(New);
return false;
}
Module *NewM = New->getOwningModule();
Module *OldM = Old->getOwningModule();
if (NewM && NewM->Kind == Module::PrivateModuleFragment)
NewM = NewM->Parent;
if (OldM && OldM->Kind == Module::PrivateModuleFragment)
OldM = OldM->Parent;
if (NewM == OldM)
return false;
bool NewIsModuleInterface = NewM && NewM->isModulePurview();
bool OldIsModuleInterface = OldM && OldM->isModulePurview();
if (NewIsModuleInterface || OldIsModuleInterface) {
// C++ Modules TS [basic.def.odr] 6.2/6.7 [sic]:
// if a declaration of D [...] appears in the purview of a module, all
// other such declarations shall appear in the purview of the same module
Diag(New->getLocation(), diag::err_mismatched_owning_module)
<< New
<< NewIsModuleInterface
<< (NewIsModuleInterface ? NewM->getFullModuleName() : "")
<< OldIsModuleInterface
<< (OldIsModuleInterface ? OldM->getFullModuleName() : "");
Diag(Old->getLocation(), diag::note_previous_declaration);
New->setInvalidDecl();
return true;
}
return false;
}
static bool isUsingDecl(NamedDecl *D) {
return isa<UsingShadowDecl>(D) ||
isa<UnresolvedUsingTypenameDecl>(D) ||
isa<UnresolvedUsingValueDecl>(D);
}
/// Removes using shadow declarations from the lookup results.
static void RemoveUsingDecls(LookupResult &R) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext())
if (isUsingDecl(F.next()))
F.erase();
F.done();
}
/// Check for this common pattern:
/// @code
/// class S {
/// S(const S&); // DO NOT IMPLEMENT
/// void operator=(const S&); // DO NOT IMPLEMENT
/// };
/// @endcode
static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
// FIXME: Should check for private access too but access is set after we get
// the decl here.
if (D->doesThisDeclarationHaveABody())
return false;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
return CD->isCopyConstructor();
return D->isCopyAssignmentOperator();
}
// We need this to handle
//
// typedef struct {
// void *foo() { return 0; }
// } A;
//
// When we see foo we don't know if after the typedef we will get 'A' or '*A'
// for example. If 'A', foo will have external linkage. If we have '*A',
// foo will have no linkage. Since we can't know until we get to the end
// of the typedef, this function finds out if D might have non-external linkage.
// Callers should verify at the end of the TU if it D has external linkage or
// not.
bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
const DeclContext *DC = D->getDeclContext();
while (!DC->isTranslationUnit()) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(DC)){
if (!RD->hasNameForLinkage())
return true;
}
DC = DC->getParent();
}
return !D->isExternallyVisible();
}
// FIXME: This needs to be refactored; some other isInMainFile users want
// these semantics.
static bool isMainFileLoc(const Sema &S, SourceLocation Loc) {
if (S.TUKind != TU_Complete)
return false;
return S.SourceMgr.isInMainFile(Loc);
}
bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
assert(D);
if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>())
return false;
// Ignore all entities declared within templates, and out-of-line definitions
// of members of class templates.
if (D->getDeclContext()->isDependentContext() ||
D->getLexicalDeclContext()->isDependentContext())
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return false;
// A non-out-of-line declaration of a member specialization was implicitly
// instantiated; it's the out-of-line declaration that we're interested in.
if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
FD->getMemberSpecializationInfo() && !FD->isOutOfLine())
return false;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isVirtual() || IsDisallowedCopyOrAssign(MD))
return false;
} else {
// 'static inline' functions are defined in headers; don't warn.
if (FD->isInlined() && !isMainFileLoc(*this, FD->getLocation()))
return false;
}
if (FD->doesThisDeclarationHaveABody() &&
Context.DeclMustBeEmitted(FD))
return false;
} else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// Constants and utility variables are defined in headers with internal
// linkage; don't warn. (Unlike functions, there isn't a convenient marker
// like "inline".)
if (!isMainFileLoc(*this, VD->getLocation()))
return false;
if (Context.DeclMustBeEmitted(VD))
return false;
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return false;
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
VD->getMemberSpecializationInfo() && !VD->isOutOfLine())
return false;
if (VD->isInline() && !isMainFileLoc(*this, VD->getLocation()))
return false;
} else {
return false;
}
// Only warn for unused decls internal to the translation unit.
// FIXME: This seems like a bogus check; it suppresses -Wunused-function
// for inline functions defined in the main source file, for instance.
return mightHaveNonExternalLinkage(D);
}
void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
if (!D)
return;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
const FunctionDecl *First = FD->getFirstDecl();
if (FD != First && ShouldWarnIfUnusedFileScopedDecl(First))
return; // First should already be in the vector.
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *First = VD->getFirstDecl();
if (VD != First && ShouldWarnIfUnusedFileScopedDecl(First))
return; // First should already be in the vector.
}
if (ShouldWarnIfUnusedFileScopedDecl(D))
UnusedFileScopedDecls.push_back(D);
}
static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isInvalidDecl())
return false;
bool Referenced = false;
if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
// For a decomposition declaration, warn if none of the bindings are
// referenced, instead of if the variable itself is referenced (which
// it is, by the bindings' expressions).
for (auto *BD : DD->bindings()) {
if (BD->isReferenced()) {
Referenced = true;
break;
}
}
} else if (!D->getDeclName()) {
return false;
} else if (D->isReferenced() || D->isUsed()) {
Referenced = true;
}
if (Referenced || D->hasAttr<UnusedAttr>() ||
D->hasAttr<ObjCPreciseLifetimeAttr>())
return false;
if (isa<LabelDecl>(D))
return true;
// Except for labels, we only care about unused decls that are local to
// functions.
bool WithinFunction = D->getDeclContext()->isFunctionOrMethod();
if (const auto *R = dyn_cast<CXXRecordDecl>(D->getDeclContext()))
// For dependent types, the diagnostic is deferred.
WithinFunction =
WithinFunction || (R->isLocalClass() && !R->isDependentType());
if (!WithinFunction)
return false;
if (isa<TypedefNameDecl>(D))
return true;
// White-list anything that isn't a local variable.
if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D))
return false;
// Types of valid local variables should be complete, so this should succeed.
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// White-list anything with an __attribute__((unused)) type.
const auto *Ty = VD->getType().getTypePtr();
// Only look at the outermost level of typedef.
if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
if (TT->getDecl()->hasAttr<UnusedAttr>())
return false;
}
// If we failed to complete the type for some reason, or if the type is
// dependent, don't diagnose the variable.
if (Ty->isIncompleteType() || Ty->isDependentType())
return false;
// Look at the element type to ensure that the warning behaviour is
// consistent for both scalars and arrays.
Ty = Ty->getBaseElementTypeUnsafe();
if (const TagType *TT = Ty->getAs<TagType>()) {
const TagDecl *Tag = TT->getDecl();
if (Tag->hasAttr<UnusedAttr>())
return false;
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>())
return false;
if (const Expr *Init = VD->getInit()) {
if (const ExprWithCleanups *Cleanups =
dyn_cast<ExprWithCleanups>(Init))
Init = Cleanups->getSubExpr();
const CXXConstructExpr *Construct =
dyn_cast<CXXConstructExpr>(Init);
if (Construct && !Construct->isElidable()) {
CXXConstructorDecl *CD = Construct->getConstructor();
if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>() &&
(VD->getInit()->isValueDependent() || !VD->evaluateValue()))
return false;
}
}
}
}
// TODO: __attribute__((unused)) templates?
}
return true;
}
static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
FixItHint &Hint) {
if (isa<LabelDecl>(D)) {
SourceLocation AfterColon = Lexer::findLocationAfterToken(
D->getEndLoc(), tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(),
true);
if (AfterColon.isInvalid())
return;
Hint = FixItHint::CreateRemoval(
CharSourceRange::getCharRange(D->getBeginLoc(), AfterColon));
}
}
void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
if (D->getTypeForDecl()->isDependentType())
return;
for (auto *TmpD : D->decls()) {
if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD))
DiagnoseUnusedDecl(T);
else if(const auto *R = dyn_cast<RecordDecl>(TmpD))
DiagnoseUnusedNestedTypedefs(R);
}
}
/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
/// unless they are marked attr(unused).
void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
if (!ShouldDiagnoseUnusedDecl(D))
return;
if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
// typedefs can be referenced later on, so the diagnostics are emitted
// at end-of-translation-unit.
UnusedLocalTypedefNameCandidates.insert(TD);
return;
}
FixItHint Hint;
GenerateFixForUnusedDecl(D, Context, Hint);
unsigned DiagID;
if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable())
DiagID = diag::warn_unused_exception_param;
else if (isa<LabelDecl>(D))
DiagID = diag::warn_unused_label;
else
DiagID = diag::warn_unused_variable;
Diag(D->getLocation(), DiagID) << D << Hint;
}
static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
// Verify that we have no forward references left. If so, there was a goto
// or address of a label taken, but no definition of it. Label fwd
// definitions are indicated with a null substmt which is also not a resolved
// MS inline assembly label name.
bool Diagnose = false;
if (L->isMSAsmLabel())
Diagnose = !L->isResolvedMSAsmLabel();
else
Diagnose = L->getStmt() == nullptr;
if (Diagnose)
S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
S->mergeNRVOIntoParent();
if (S->decl_empty()) return;
assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
"Scope shouldn't contain decls!");
for (auto *TmpD : S->decls()) {
assert(TmpD && "This decl didn't get pushed??");
assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
NamedDecl *D = cast<NamedDecl>(TmpD);
// Diagnose unused variables in this scope.
if (!S->hasUnrecoverableErrorOccurred()) {
DiagnoseUnusedDecl(D);
if (const auto *RD = dyn_cast<RecordDecl>(D))
DiagnoseUnusedNestedTypedefs(RD);
}
if (!D->getDeclName()) continue;
// If this was a forward reference to a label, verify it was defined.
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
CheckPoppedLabel(LD, *this);
// Remove this name from our lexical scope, and warn on it if we haven't
// already.
IdResolver.RemoveDecl(D);
auto ShadowI = ShadowingDecls.find(D);
if (ShadowI != ShadowingDecls.end()) {
if (const auto *FD = dyn_cast<FieldDecl>(ShadowI->second)) {
Diag(D->getLocation(), diag::warn_ctor_parm_shadows_field)
<< D << FD << FD->getParent();
Diag(FD->getLocation(), diag::note_previous_declaration);
}
ShadowingDecls.erase(ShadowI);
}
}
}
/// Look for an Objective-C class in the translation unit.
///
/// \param Id The name of the Objective-C class we're looking for. If
/// typo-correction fixes this name, the Id will be updated
/// to the fixed name.
///
/// \param IdLoc The location of the name in the translation unit.
///
/// \param DoTypoCorrection If true, this routine will attempt typo correction
/// if there is no class with the given name.
///
/// \returns The declaration of the named Objective-C class, or NULL if the
/// class could not be found.
ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool DoTypoCorrection) {
// The third "scope" argument is 0 since we aren't enabling lazy built-in
// creation from this context.
NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
DeclFilterCCC<ObjCInterfaceDecl> CCC{};
if (TypoCorrection C =
CorrectTypo(DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName,
TUScope, nullptr, CCC, CTK_ErrorRecovery)) {
diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
Id = IDecl->getIdentifier();
}
}
ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
// This routine must always return a class definition, if any.
if (Def && Def->getDefinition())
Def = Def->getDefinition();
return Def;
}
/// getNonFieldDeclScope - Retrieves the innermost scope, starting
/// from S, where a non-field would be declared. This routine copes
/// with the difference between C and C++ scoping rules in structs and
/// unions. For example, the following code is well-formed in C but
/// ill-formed in C++:
/// @code
/// struct S6 {
/// enum { BAR } e;
/// };
///
/// void test_S6() {
/// struct S6 a;
/// a.e = BAR;
/// }
/// @endcode
/// For the declaration of BAR, this routine will return a different
/// scope. The scope S will be the scope of the unnamed enumeration
/// within S6. In C++, this routine will return the scope associated
/// with S6, because the enumeration's scope is a transparent
/// context but structures can contain non-field names. In C, this
/// routine will return the translation unit scope, since the
/// enumeration's scope is a transparent context and structures cannot
/// contain non-field names.
Scope *Sema::getNonFieldDeclScope(Scope *S) {
while (((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() && S->getEntity()->isTransparentContext()) ||
(S->isClassScope() && !getLangOpts().CPlusPlus))
S = S->getParent();
return S;
}
/// Looks up the declaration of "struct objc_super" and
/// saves it for later use in building builtin declaration of
/// objc_msgSendSuper and objc_msgSendSuper_stret. If no such
/// pre-existing declaration exists no action takes place.
static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S,
IdentifierInfo *II) {
if (!II->isStr("objc_msgSendSuper"))
return;
ASTContext &Context = ThisSema.Context;
LookupResult Result(ThisSema, &Context.Idents.get("objc_super"),
SourceLocation(), Sema::LookupTagName);
ThisSema.LookupName(Result, S);
if (Result.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
Context.setObjCSuperType(Context.getTagDeclType(TD));
}
static StringRef getHeaderName(Builtin::Context &BuiltinInfo, unsigned ID,
ASTContext::GetBuiltinTypeError Error) {
switch (Error) {
case ASTContext::GE_None:
return "";
case ASTContext::GE_Missing_type:
return BuiltinInfo.getHeaderName(ID);
case ASTContext::GE_Missing_stdio:
return "stdio.h";
case ASTContext::GE_Missing_setjmp:
return "setjmp.h";
case ASTContext::GE_Missing_ucontext:
return "ucontext.h";
}
llvm_unreachable("unhandled error kind");
}
/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
/// file scope. lazily create a decl for it. ForRedeclaration is true
/// if we're creating this built-in in anticipation of redeclaring the
/// built-in.
NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc) {
LookupPredefedObjCSuperType(*this, S, II);
ASTContext::GetBuiltinTypeError Error;
QualType R = Context.GetBuiltinType(ID, Error);
if (Error) {
if (!ForRedeclaration)
return nullptr;
// If we have a builtin without an associated type we should not emit a
// warning when we were not able to find a type for it.
if (Error == ASTContext::GE_Missing_type)
return nullptr;
// If we could not find a type for setjmp it is because the jmp_buf type was
// not defined prior to the setjmp declaration.
if (Error == ASTContext::GE_Missing_setjmp) {
Diag(Loc, diag::warn_implicit_decl_no_jmp_buf)
<< Context.BuiltinInfo.getName(ID);
return nullptr;
}
// Generally, we emit a warning that the declaration requires the
// appropriate header.
Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
<< getHeaderName(Context.BuiltinInfo, ID, Error)
<< Context.BuiltinInfo.getName(ID);
return nullptr;
}
if (!ForRedeclaration &&
(Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (Context.BuiltinInfo.getHeaderName(ID) &&
!Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
Diag(Loc, diag::note_include_header_or_declare)
<< Context.BuiltinInfo.getHeaderName(ID)
<< Context.BuiltinInfo.getName(ID);
}
if (R.isNull())
return nullptr;
DeclContext *Parent = Context.getTranslationUnitDecl();
if (getLangOpts().CPlusPlus) {
LinkageSpecDecl *CLinkageDecl =
LinkageSpecDecl::Create(Context, Parent, Loc, Loc,
LinkageSpecDecl::lang_c, false);
CLinkageDecl->setImplicit();
Parent->addDecl(CLinkageDecl);
Parent = CLinkageDecl;
}
FunctionDecl *New = FunctionDecl::Create(Context,
Parent,
Loc, Loc, II, R, /*TInfo=*/nullptr,
SC_Extern,
false,
R->isFunctionProtoType());
New->setImplicit();
// Create Decl objects for each parameter, adding them to the
// FunctionDecl.
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
SmallVector<ParmVarDecl*, 16> Params;
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
ParmVarDecl *parm =
ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(),
nullptr, FT->getParamType(i), /*TInfo=*/nullptr,
SC_None, nullptr);
parm->setScopeInfo(0, i);
Params.push_back(parm);
}
New->setParams(Params);
}
AddKnownFunctionAttributes(New);
RegisterLocallyScopedExternCDecl(New, S);
// TUScope is the translation-unit scope to insert this function into.
// FIXME: This is hideous. We need to teach PushOnScopeChains to
// relate Scopes to DeclContexts, and probably eliminate CurContext
// entirely, but we're not there yet.
DeclContext *SavedContext = CurContext;
CurContext = Parent;
PushOnScopeChains(New, TUScope);
CurContext = SavedContext;
return New;
}
/// Typedef declarations don't have linkage, but they still denote the same
/// entity if their types are the same.
/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
/// isSameEntity.
static void filterNonConflictingPreviousTypedefDecls(Sema &S,
TypedefNameDecl *Decl,
LookupResult &Previous) {
// This is only interesting when modules are enabled.
if (!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility)
return;
// Empty sets are uninteresting.
if (Previous.empty())
return;
LookupResult::Filter Filter = Previous.makeFilter();
while (Filter.hasNext()) {
NamedDecl *Old = Filter.next();
// Non-hidden declarations are never ignored.
if (S.isVisible(Old))
continue;
// Declarations of the same entity are not ignored, even if they have
// different linkages.
if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
if (S.Context.hasSameType(OldTD->getUnderlyingType(),
Decl->getUnderlyingType()))
continue;
// If both declarations give a tag declaration a typedef name for linkage
// purposes, then they declare the same entity.
if (OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) &&
Decl->getAnonDeclWithTypedefName())
continue;
}
Filter.erase();
}
Filter.done();
}
bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
QualType OldType;
if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
OldType = OldTypedef->getUnderlyingType();
else
OldType = Context.getTypeDeclType(Old);
QualType NewType = New->getUnderlyingType();
if (NewType->isVariablyModifiedType()) {
// Must not redefine a typedef with a variably-modified type.
int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef)
<< Kind << NewType;
if (Old->getLocation().isValid())
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
if (OldType != NewType &&
!OldType->isDependentType() &&
!NewType->isDependentType() &&
!Context.hasSameType(OldType, NewType)) {
int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
Diag(New->getLocation(), diag::err_redefinition_different_typedef)
<< Kind << NewType << OldType;
if (Old->getLocation().isValid())
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
return false;
}
/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
/// same name and scope as a previous declaration 'Old'. Figure out
/// how to resolve this situation, merging decls or emitting
/// diagnostics as appropriate. If there was an error, set New to be invalid.
///
void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls) {
// If the new decl is known invalid already, don't bother doing any
// merging checks.
if (New->isInvalidDecl()) return;
// Allow multiple definitions for ObjC built-in typedefs.
// FIXME: Verify the underlying types are equivalent!
if (getLangOpts().ObjC) {
const IdentifierInfo *TypeID = New->getIdentifier();
switch (TypeID->getLength()) {
default: break;
case 2:
{
if (!TypeID->isStr("id"))
break;
QualType T = New->getUnderlyingType();
if (!T->isPointerType())
break;
if (!T->isVoidPointerType()) {
QualType PT = T->getAs<PointerType>()->getPointeeType();
if (!PT->isStructureType())
break;
}
Context.setObjCIdRedefinitionType(T);
// Install the built-in type for 'id', ignoring the current definition.
New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
return;
}
case 5:
if (!TypeID->isStr("Class"))
break;
Context.setObjCClassRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'Class', ignoring the current definition.
New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
return;
case 3:
if (!TypeID->isStr("SEL"))
break;
Context.setObjCSelRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'SEL', ignoring the current definition.
New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
return;
}
// Fall through - the typedef name was not a builtin type.
}
// Verify the old decl was also a type.
TypeDecl *Old = OldDecls.getAsSingle<TypeDecl>();
if (!Old) {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
NamedDecl *OldD = OldDecls.getRepresentativeDecl();
if (OldD->getLocation().isValid())
notePreviousDefinition(OldD, New->getLocation());
return New->setInvalidDecl();
}
// If the old declaration is invalid, just give up here.
if (Old->isInvalidDecl())
return New->setInvalidDecl();
if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
auto *OldTag = OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true);
auto *NewTag = New->getAnonDeclWithTypedefName();
NamedDecl *Hidden = nullptr;
if (OldTag && NewTag &&
OldTag->getCanonicalDecl() != NewTag->getCanonicalDecl() &&
!hasVisibleDefinition(OldTag, &Hidden)) {
// There is a definition of this tag, but it is not visible. Use it
// instead of our tag.
New->setTypeForDecl(OldTD->getTypeForDecl());
if (OldTD->isModed())
New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(),
OldTD->getUnderlyingType());
else
New->setTypeSourceInfo(OldTD->getTypeSourceInfo());
// Make the old tag definition visible.
makeMergedDefinitionVisible(Hidden);
// If this was an unscoped enumeration, yank all of its enumerators
// out of the scope.
if (isa<EnumDecl>(NewTag)) {
Scope *EnumScope = getNonFieldDeclScope(S);
for (auto *D : NewTag->decls()) {
auto *ED = cast<EnumConstantDecl>(D);
assert(EnumScope->isDeclScope(ED));
EnumScope->RemoveDecl(ED);
IdResolver.RemoveDecl(ED);
ED->getLexicalDeclContext()->removeDecl(ED);
}
}
}
}
// If the typedef types are not identical, reject them in all languages and
// with any extensions enabled.
if (isIncompatibleTypedef(Old, New))
return;
// The types match. Link up the redeclaration chain and merge attributes if
// the old declaration was a typedef.
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old)) {
New->setPreviousDecl(Typedef);
mergeDeclAttributes(New, Old);
}
if (getLangOpts().MicrosoftExt)
return;
if (getLangOpts().CPlusPlus) {
// C++ [dcl.typedef]p2:
// In a given non-class scope, a typedef specifier can be used to
// redefine the name of any type declared in that scope to refer
// to the type to which it already refers.
if (!isa<CXXRecordDecl>(CurContext))
return;
// C++0x [dcl.typedef]p4:
// In a given class scope, a typedef specifier can be used to redefine
// any class-name declared in that scope that is not also a typedef-name
// to refer to the type to which it already refers.
//
// This wording came in via DR424, which was a correction to the
// wording in DR56, which accidentally banned code like:
//
// struct S {
// typedef struct A { } A;
// };
//
// in the C++03 standard. We implement the C++0x semantics, which
// allow the above but disallow
//
// struct S {
// typedef int I;
// typedef int I;
// };
//
// since that was the intent of DR56.
if (!isa<TypedefNameDecl>(Old))
return;
Diag(New->getLocation(), diag::err_redefinition)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
return New->setInvalidDecl();
}
// Modules always permit redefinition of typedefs, as does C11.
if (getLangOpts().Modules || getLangOpts().C11)
return;
// If we have a redefinition of a typedef in C, emit a warning. This warning
// is normally mapped to an error, but can be controlled with
// -Wtypedef-redefinition. If either the original or the redefinition is
// in a system header, don't emit this for compatibility with GCC.
if (getDiagnostics().getSuppressSystemWarnings() &&
// Some standard types are defined implicitly in Clang (e.g. OpenCL).
(Old->isImplicit() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
Context.getSourceManager().isInSystemHeader(New->getLocation())))
return;
Diag(New->getLocation(), diag::ext_redefinition_of_typedef)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
}
/// DeclhasAttr - returns true if decl Declaration already has the target
/// attribute.
static bool DeclHasAttr(const Decl *D, const Attr *A) {
const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
for (const auto *i : D->attrs())
if (i->getKind() == A->getKind()) {
if (Ann) {
if (Ann->getAnnotation() == cast<AnnotateAttr>(i)->getAnnotation())
return true;
continue;
}
// FIXME: Don't hardcode this check
if (OA && isa<OwnershipAttr>(i))
return OA->getOwnKind() == cast<OwnershipAttr>(i)->getOwnKind();
return true;
}
return false;
}
static bool isAttributeTargetADefinition(Decl *D) {
if (VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->isThisDeclarationADefinition();
if (TagDecl *TD = dyn_cast<TagDecl>(D))
return TD->isCompleteDefinition() || TD->isBeingDefined();
return true;
}
/// Merge alignment attributes from \p Old to \p New, taking into account the
/// special semantics of C11's _Alignas specifier and C++11's alignas attribute.
///
/// \return \c true if any attributes were added to \p New.
static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
// Look for alignas attributes on Old, and pick out whichever attribute
// specifies the strictest alignment requirement.
AlignedAttr *OldAlignasAttr = nullptr;
AlignedAttr *OldStrictestAlignAttr = nullptr;
unsigned OldAlign = 0;
for (auto *I : Old->specific_attrs<AlignedAttr>()) {
// FIXME: We have no way of representing inherited dependent alignments
// in a case like:
// template<int A, int B> struct alignas(A) X;
// template<int A, int B> struct alignas(B) X {};
// For now, we just ignore any alignas attributes which are not on the
// definition in such a case.
if (I->isAlignmentDependent())
return false;
if (I->isAlignas())
OldAlignasAttr = I;
unsigned Align = I->getAlignment(S.Context);
if (Align > OldAlign) {
OldAlign = Align;
OldStrictestAlignAttr = I;
}
}
// Look for alignas attributes on New.
AlignedAttr *NewAlignasAttr = nullptr;
unsigned NewAlign = 0;
for (auto *I : New->specific_attrs<AlignedAttr>()) {
if (I->isAlignmentDependent())
return false;
if (I->isAlignas())
NewAlignasAttr = I;
unsigned Align = I->getAlignment(S.Context);
if (Align > NewAlign)
NewAlign = Align;
}
if (OldAlignasAttr && NewAlignasAttr && OldAlign != NewAlign) {
// Both declarations have 'alignas' attributes. We require them to match.
// C++11 [dcl.align]p6 and C11 6.7.5/7 both come close to saying this, but
// fall short. (If two declarations both have alignas, they must both match
// every definition, and so must match each other if there is a definition.)
// If either declaration only contains 'alignas(0)' specifiers, then it
// specifies the natural alignment for the type.
if (OldAlign == 0 || NewAlign == 0) {
QualType Ty;
if (ValueDecl *VD = dyn_cast<ValueDecl>(New))
Ty = VD->getType();
else
Ty = S.Context.getTagDeclType(cast<TagDecl>(New));
if (OldAlign == 0)
OldAlign = S.Context.getTypeAlign(Ty);
if (NewAlign == 0)
NewAlign = S.Context.getTypeAlign(Ty);
}
if (OldAlign != NewAlign) {
S.Diag(NewAlignasAttr->getLocation(), diag::err_alignas_mismatch)
<< (unsigned)S.Context.toCharUnitsFromBits(OldAlign).getQuantity()
<< (unsigned)S.Context.toCharUnitsFromBits(NewAlign).getQuantity();
S.Diag(OldAlignasAttr->getLocation(), diag::note_previous_declaration);
}
}
if (OldAlignasAttr && !NewAlignasAttr && isAttributeTargetADefinition(New)) {
// C++11 [dcl.align]p6:
// if any declaration of an entity has an alignment-specifier,
// every defining declaration of that entity shall specify an
// equivalent alignment.
// C11 6.7.5/7:
// If the definition of an object does not have an alignment
// specifier, any other declaration of that object shall also
// have no alignment specifier.
S.Diag(New->getLocation(), diag::err_alignas_missing_on_definition)
<< OldAlignasAttr;
S.Diag(OldAlignasAttr->getLocation(), diag::note_alignas_on_declaration)
<< OldAlignasAttr;
}
bool AnyAdded = false;
// Ensure we have an attribute representing the strictest alignment.
if (OldAlign > NewAlign) {
AlignedAttr *Clone = OldStrictestAlignAttr->clone(S.Context);
Clone->setInherited(true);
New->addAttr(Clone);
AnyAdded = true;
}
// Ensure we have an alignas attribute if the old declaration had one.
if (OldAlignasAttr && !NewAlignasAttr &&
!(AnyAdded && OldStrictestAlignAttr->isAlignas())) {
AlignedAttr *Clone = OldAlignasAttr->clone(S.Context);
Clone->setInherited(true);
New->addAttr(Clone);
AnyAdded = true;
}
return AnyAdded;
}
static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
const InheritableAttr *Attr,
Sema::AvailabilityMergeKind AMK) {
// This function copies an attribute Attr from a previous declaration to the
// new declaration D if the new declaration doesn't itself have that attribute
// yet or if that attribute allows duplicates.
// If you're adding a new attribute that requires logic different from
// "use explicit attribute on decl if present, else use attribute from
// previous decl", for example if the attribute needs to be consistent
// between redeclarations, you need to call a custom merge function here.
InheritableAttr *NewAttr = nullptr;
unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
NewAttr = S.mergeAvailabilityAttr(
D, AA->getRange(), AA->getPlatform(), AA->isImplicit(),
AA->getIntroduced(), AA->getDeprecated(), AA->getObsoleted(),
AA->getUnavailable(), AA->getMessage(), AA->getStrict(),
AA->getReplacement(), AMK, AA->getPriority(), AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr))
NewAttr = S.mergeTypeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
AttrSpellingListIndex);
else if (const auto *ImportA = dyn_cast<DLLImportAttr>(Attr))
NewAttr = S.mergeDLLImportAttr(D, ImportA->getRange(),
AttrSpellingListIndex);
else if (const auto *ExportA = dyn_cast<DLLExportAttr>(Attr))
NewAttr = S.mergeDLLExportAttr(D, ExportA->getRange(),
AttrSpellingListIndex);
else if (const auto *FA = dyn_cast<FormatAttr>(Attr))
NewAttr = S.mergeFormatAttr(D, FA->getRange(), FA->getType(),
FA->getFormatIdx(), FA->getFirstArg(),
AttrSpellingListIndex);
else if (const auto *SA = dyn_cast<SectionAttr>(Attr))
NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(),
AttrSpellingListIndex);
else if (const auto *CSA = dyn_cast<CodeSegAttr>(Attr))
NewAttr = S.mergeCodeSegAttr(D, CSA->getRange(), CSA->getName(),
AttrSpellingListIndex);
else if (const auto *IA = dyn_cast<MSInheritanceAttr>(Attr))
NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
AttrSpellingListIndex,
IA->getSemanticSpelling());
else if (const auto *AA = dyn_cast<AlwaysInlineAttr>(Attr))
NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
&S.Context.Idents.get(AA->getSpelling()),
AttrSpellingListIndex);
else if (S.getLangOpts().CUDA && isa<FunctionDecl>(D) &&
(isa<CUDAHostAttr>(Attr) || isa<CUDADeviceAttr>(Attr) ||
isa<CUDAGlobalAttr>(Attr))) {
// CUDA target attributes are part of function signature for
// overloading purposes and must not be merged.
return false;
} else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
NewAttr = S.mergeInternalLinkageAttr(D, *InternalLinkageA);
else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
NewAttr = S.mergeCommonAttr(D, *CommonA);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
NewAttr = nullptr;
else if ((isa<DeprecatedAttr>(Attr) || isa<UnavailableAttr>(Attr)) &&
(AMK == Sema::AMK_Override ||
AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, UA->getRange(), AttrSpellingListIndex,
UA->getGuid());
else if (const auto *SLHA = dyn_cast<SpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeSpeculativeLoadHardeningAttr(D, *SLHA);
else if (const auto *SLHA = dyn_cast<NoSpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeNoSpeculativeLoadHardeningAttr(D, *SLHA);
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
if (NewAttr) {
NewAttr->setInherited(true);
D->addAttr(NewAttr);
if (isa<MSInheritanceAttr>(NewAttr))
S.Consumer.AssignInheritanceModel(cast<CXXRecordDecl>(D));
return true;
}
return false;
}
static const NamedDecl *getDefinition(const Decl *D) {
if (const TagDecl *TD = dyn_cast<TagDecl>(D))
return TD->getDefinition();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *Def = VD->getDefinition();
if (Def)
return Def;
return VD->getActingDefinition();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
return FD->getDefinition();
return nullptr;
}
static bool hasAttribute(const Decl *D, attr::Kind Kind) {
for (const auto *Attribute : D->attrs())
if (Attribute->getKind() == Kind)
return true;
return false;
}
/// checkNewAttributesAfterDef - If we already have a definition, check that
/// there are no new attributes in this declaration.
static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
if (!New->hasAttrs())
return;
const NamedDecl *Def = getDefinition(Old);
if (!Def || Def == New)
return;
AttrVec &NewAttributes = New->getAttrs();
for (unsigned I = 0, E = NewAttributes.size(); I != E;) {
const Attr *NewAttribute = NewAttributes[I];
if (isa<AliasAttr>(NewAttribute) || isa<IFuncAttr>(NewAttribute)) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) {
Sema::SkipBodyInfo SkipBody;
S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def), &SkipBody);
// If we're skipping this definition, drop the "alias" attribute.
if (SkipBody.ShouldSkip) {
NewAttributes.erase(NewAttributes.begin() + I);
--E;
continue;
}
} else {
VarDecl *VD = cast<VarDecl>(New);
unsigned Diag = cast<VarDecl>(Def)->isThisDeclarationADefinition() ==
VarDecl::TentativeDefinition
? diag::err_alias_after_tentative
: diag::err_redefinition;
S.Diag(VD->getLocation(), Diag) << VD->getDeclName();
if (Diag == diag::err_redefinition)
S.notePreviousDefinition(Def, VD->getLocation());
else
S.Diag(Def->getLocation(), diag::note_previous_definition);
VD->setInvalidDecl();
}
++I;
continue;
}
if (const VarDecl *VD = dyn_cast<VarDecl>(Def)) {
// Tentative definitions are only interesting for the alias check above.
if (VD->isThisDeclarationADefinition() != VarDecl::Definition) {
++I;
continue;
}
}
if (hasAttribute(Def, NewAttribute->getKind())) {
++I;
continue; // regular attr merging will take care of validating this.
}
if (isa<C11NoReturnAttr>(NewAttribute)) {
// C's _Noreturn is allowed to be added to a function after it is defined.
++I;
continue;
} else if (const AlignedAttr *AA = dyn_cast<AlignedAttr>(NewAttribute)) {
if (AA->isAlignas()) {
// C++11 [dcl.align]p6:
// if any declaration of an entity has an alignment-specifier,
// every defining declaration of that entity shall specify an
// equivalent alignment.
// C11 6.7.5/7:
// If the definition of an object does not have an alignment
// specifier, any other declaration of that object shall also
// have no alignment specifier.
S.Diag(Def->getLocation(), diag::err_alignas_missing_on_definition)
<< AA;
S.Diag(NewAttribute->getLocation(), diag::note_alignas_on_declaration)
<< AA;
NewAttributes.erase(NewAttributes.begin() + I);
--E;
continue;
}
}
S.Diag(NewAttribute->getLocation(),
diag::warn_attribute_precede_definition);
S.Diag(Def->getLocation(), diag::note_previous_definition);
NewAttributes.erase(NewAttributes.begin() + I);
--E;
}
}
/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK) {
if (UsedAttr *OldAttr = Old->getMostRecentDecl()->getAttr<UsedAttr>()) {
UsedAttr *NewAttr = OldAttr->clone(Context);
NewAttr->setInherited(true);
New->addAttr(NewAttr);
}
if (!Old->hasAttrs() && !New->hasAttrs())
return;
// Attributes declared post-definition are currently ignored.
checkNewAttributesAfterDef(*this, New, Old);
if (AsmLabelAttr *NewA = New->getAttr<AsmLabelAttr>()) {
if (AsmLabelAttr *OldA = Old->getAttr<AsmLabelAttr>()) {
if (OldA->getLabel() != NewA->getLabel()) {
// This redeclaration changes __asm__ label.
Diag(New->getLocation(), diag::err_different_asm_label);
Diag(OldA->getLocation(), diag::note_previous_declaration);
}
} else if (Old->isUsed()) {
// This redeclaration adds an __asm__ label to a declaration that has
// already been ODR-used.
Diag(New->getLocation(), diag::err_late_asm_label_name)
<< isa<FunctionDecl>(Old) << New->getAttr<AsmLabelAttr>()->getRange();
}
}
// Re-declaration cannot add abi_tag's.
if (const auto *NewAbiTagAttr = New->getAttr<AbiTagAttr>()) {
if (const auto *OldAbiTagAttr = Old->getAttr<AbiTagAttr>()) {
for (const auto &NewTag : NewAbiTagAttr->tags()) {
if (std::find(OldAbiTagAttr->tags_begin(), OldAbiTagAttr->tags_end(),
NewTag) == OldAbiTagAttr->tags_end()) {
Diag(NewAbiTagAttr->getLocation(),
diag::err_new_abi_tag_on_redeclaration)
<< NewTag;
Diag(OldAbiTagAttr->getLocation(), diag::note_previous_declaration);
}
}
} else {
Diag(NewAbiTagAttr->getLocation(), diag::err_abi_tag_on_redeclaration);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
}
// This redeclaration adds a section attribute.
if (New->hasAttr<SectionAttr>() && !Old->hasAttr<SectionAttr>()) {
if (auto *VD = dyn_cast<VarDecl>(New)) {
if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly) {
Diag(New->getLocation(), diag::warn_attribute_section_on_redeclaration);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
}
}
// Redeclaration adds code-seg attribute.
const auto *NewCSA = New->getAttr<CodeSegAttr>();
if (NewCSA && !Old->hasAttr<CodeSegAttr>() &&
!NewCSA->isImplicit() && isa<CXXMethodDecl>(New)) {
Diag(New->getLocation(), diag::warn_mismatched_section)
<< 0 /*codeseg*/;
Diag(Old->getLocation(), diag::note_previous_declaration);
}
if (!Old->hasAttrs())
return;
bool foundAny = New->hasAttrs();
// Ensure that any moving of objects within the allocated map is done before
// we process them.
if (!foundAny) New->setAttrs(AttrVec());
for (auto *I : Old->specific_attrs<InheritableAttr>()) {
// Ignore deprecated/unavailable/availability attributes if requested.
AvailabilityMergeKind LocalAMK = AMK_None;
if (isa<DeprecatedAttr>(I) ||
isa<UnavailableAttr>(I) ||
isa<AvailabilityAttr>(I)) {
switch (AMK) {
case AMK_None:
continue;
case AMK_Redeclaration:
case AMK_Override:
case AMK_ProtocolImplementation:
LocalAMK = AMK;
break;
}
}
// Already handled.
if (isa<UsedAttr>(I))
continue;
if (mergeDeclAttribute(*this, New, I, LocalAMK))
foundAny = true;
}
if (mergeAlignedAttrs(*this, New, Old))
foundAny = true;
if (!foundAny) New->dropAttrs();
}
/// mergeParamDeclAttributes - Copy attributes from the old parameter
/// to the new one.
static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
const ParmVarDecl *oldDecl,
Sema &S) {
// C++11 [dcl.attr.depend]p2:
// The first declaration of a function shall specify the
// carries_dependency attribute for its declarator-id if any declaration
// of the function specifies the carries_dependency attribute.
const CarriesDependencyAttr *CDA = newDecl->getAttr<CarriesDependencyAttr>();
if (CDA && !oldDecl->hasAttr<CarriesDependencyAttr>()) {
S.Diag(CDA->getLocation(),
diag::err_carries_dependency_missing_on_first_decl) << 1/*Param*/;
// Find the first declaration of the parameter.
// FIXME: Should we build redeclaration chains for function parameters?
const FunctionDecl *FirstFD =
cast<FunctionDecl>(oldDecl->getDeclContext())->getFirstDecl();
const ParmVarDecl *FirstVD =
FirstFD->getParamDecl(oldDecl->getFunctionScopeIndex());
S.Diag(FirstVD->getLocation(),
diag::note_carries_dependency_missing_first_decl) << 1/*Param*/;
}
if (!oldDecl->hasAttrs())
return;
bool foundAny = newDecl->hasAttrs();
// Ensure that any moving of objects within the allocated map is
// done before we process them.
if (!foundAny) newDecl->setAttrs(AttrVec());
for (const auto *I : oldDecl->specific_attrs<InheritableParamAttr>()) {
if (!DeclHasAttr(newDecl, I)) {
InheritableAttr *newAttr =
cast<InheritableParamAttr>(I->clone(S.Context));
newAttr->setInherited(true);
newDecl->addAttr(newAttr);
foundAny = true;
}
}
if (!foundAny) newDecl->dropAttrs();
}
static void mergeParamDeclTypes(ParmVarDecl *NewParam,
const ParmVarDecl *OldParam,
Sema &S) {
if (auto Oldnullability = OldParam->getType()->getNullability(S.Context)) {
if (auto Newnullability = NewParam->getType()->getNullability(S.Context)) {
if (*Oldnullability != *Newnullability) {
S.Diag(NewParam->getLocation(), diag::warn_mismatched_nullability_attr)
<< DiagNullabilityKind(
*Newnullability,
((NewParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
!= 0))
<< DiagNullabilityKind(
*Oldnullability,
((OldParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
!= 0));
S.Diag(OldParam->getLocation(), diag::note_previous_declaration);
}
} else {
QualType NewT = NewParam->getType();
NewT = S.Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*Oldnullability),
NewT, NewT);
NewParam->setType(NewT);
}
}
}
namespace {
/// Used in MergeFunctionDecl to keep track of function parameters in
/// C.
struct GNUCompatibleParamWarning {
ParmVarDecl *OldParm;
ParmVarDecl *NewParm;
QualType PromotedType;
};
} // end anonymous namespace
/// getSpecialMember - get the special member enum for a method.
Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
if (Ctor->isDefaultConstructor())
return Sema::CXXDefaultConstructor;
if (Ctor->isCopyConstructor())
return Sema::CXXCopyConstructor;
if (Ctor->isMoveConstructor())
return Sema::CXXMoveConstructor;
} else if (isa<CXXDestructorDecl>(MD)) {
return Sema::CXXDestructor;
} else if (MD->isCopyAssignmentOperator()) {
return Sema::CXXCopyAssignment;
} else if (MD->isMoveAssignmentOperator()) {
return Sema::CXXMoveAssignment;
}
return Sema::CXXInvalid;
}
// Determine whether the previous declaration was a definition, implicit
// declaration, or a declaration.
template <typename T>
static std::pair<diag::kind, SourceLocation>
getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) {
diag::kind PrevDiag;
SourceLocation OldLocation = Old->getLocation();
if (Old->isThisDeclarationADefinition())
PrevDiag = diag::note_previous_definition;
else if (Old->isImplicit()) {
PrevDiag = diag::note_previous_implicit_declaration;
if (OldLocation.isInvalid())
OldLocation = New->getLocation();
} else
PrevDiag = diag::note_previous_declaration;
return std::make_pair(PrevDiag, OldLocation);
}
/// canRedefineFunction - checks if a function can be redefined. Currently,
/// only extern inline functions can be redefined, and even then only in
/// GNU89 mode.
static bool canRedefineFunction(const FunctionDecl *FD,
const LangOptions& LangOpts) {
return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) &&
!LangOpts.CPlusPlus &&
FD->isInlineSpecified() &&
FD->getStorageClass() == SC_Extern);
}
const AttributedType *Sema::getCallingConvAttributedType(QualType T) const {
const AttributedType *AT = T->getAs<AttributedType>();
while (AT && !AT->isCallingConv())
AT = AT->getModifiedType()->getAs<AttributedType>();
return AT;
}
template <typename T>
static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) {
const DeclContext *DC = Old->getDeclContext();
if (DC->isRecord())
return false;
LanguageLinkage OldLinkage = Old->getLanguageLinkage();
if (OldLinkage == CXXLanguageLinkage && New->isInExternCContext())
return true;
if (OldLinkage == CLanguageLinkage && New->isInExternCXXContext())
return true;
return false;
}
template<typename T> static bool isExternC(T *D) { return D->isExternC(); }
static bool isExternC(VarTemplateDecl *) { return false; }
/// Check whether a redeclaration of an entity introduced by a
/// using-declaration is valid, given that we know it's not an overload
/// (nor a hidden tag declaration).
template<typename ExpectedDecl>
static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS,
ExpectedDecl *New) {
// C++11 [basic.scope.declarative]p4:
// Given a set of declarations in a single declarative region, each of
// which specifies the same unqualified name,
// -- they shall all refer to the same entity, or all refer to functions
// and function templates; or
// -- exactly one declaration shall declare a class name or enumeration
// name that is not a typedef name and the other declarations shall all
// refer to the same variable or enumerator, or all refer to functions
// and function templates; in this case the class name or enumeration
// name is hidden (3.3.10).
// C++11 [namespace.udecl]p14:
// If a function declaration in namespace scope or block scope has the
// same name and the same parameter-type-list as a function introduced
// by a using-declaration, and the declarations do not declare the same
// function, the program is ill-formed.
auto *Old = dyn_cast<ExpectedDecl>(OldS->getTargetDecl());
if (Old &&
!Old->getDeclContext()->getRedeclContext()->Equals(
New->getDeclContext()->getRedeclContext()) &&
!(isExternC(Old) && isExternC(New)))
Old = nullptr;
if (!Old) {
S.Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
S.Diag(OldS->getTargetDecl()->getLocation(), diag::note_using_decl_target);
S.Diag(OldS->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
return true;
}
return false;
}
static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
const FunctionDecl *B) {
assert(A->getNumParams() == B->getNumParams());
auto AttrEq = [](const ParmVarDecl *A, const ParmVarDecl *B) {
const auto *AttrA = A->getAttr<PassObjectSizeAttr>();
const auto *AttrB = B->getAttr<PassObjectSizeAttr>();
if (AttrA == AttrB)
return true;
return AttrA && AttrB && AttrA->getType() == AttrB->getType() &&
AttrA->isDynamic() == AttrB->isDynamic();
};
return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
}
/// If necessary, adjust the semantic declaration context for a qualified
/// declaration to name the correct inline namespace within the qualifier.
static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
DeclaratorDecl *OldD) {
// The only case where we need to update the DeclContext is when
// redeclaration lookup for a qualified name finds a declaration
// in an inline namespace within the context named by the qualifier:
//
// inline namespace N { int f(); }
// int ::f(); // Sema DC needs adjusting from :: to N::.
//
// For unqualified declarations, the semantic context *can* change
// along the redeclaration chain (for local extern declarations,
// extern "C" declarations, and friend declarations in particular).
if (!NewD->getQualifier())
return;
// NewD is probably already in the right context.
auto *NamedDC = NewD->getDeclContext()->getRedeclContext();
auto *SemaDC = OldD->getDeclContext()->getRedeclContext();
if (NamedDC->Equals(SemaDC))
return;
assert((NamedDC->InEnclosingNamespaceSetOf(SemaDC) ||
NewD->isInvalidDecl() || OldD->isInvalidDecl()) &&
"unexpected context for redeclaration");
auto *LexDC = NewD->getLexicalDeclContext();
auto FixSemaDC = [=](NamedDecl *D) {
if (!D)
return;
D->setDeclContext(SemaDC);
D->setLexicalDeclContext(LexDC);
};
FixSemaDC(NewD);
if (auto *FD = dyn_cast<FunctionDecl>(NewD))
FixSemaDC(FD->getDescribedFunctionTemplate());
else if (auto *VD = dyn_cast<VarDecl>(NewD))
FixSemaDC(VD->getDescribedVarTemplate());
}
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
/// merging decls or emitting diagnostics as appropriate.
///
/// In C++, New and Old must be declarations that are not
/// overloaded. Use IsOverload to determine whether New and Old are
/// overloaded, and to select the Old declaration that New should be
/// merged with.
///
/// Returns true if there was an error, false otherwise.
bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
Scope *S, bool MergeTypeWithOld) {
// Verify the old decl was also a function.
FunctionDecl *Old = OldD->getAsFunction();
if (!Old) {
if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
if (New->getFriendObjectKind()) {
Diag(New->getLocation(), diag::err_using_decl_friend);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
Diag(Shadow->getUsingDecl()->getLocation(),
diag::note_using_decl) << 0;
return true;
}
// Check whether the two declarations might declare the same function.
if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New))
return true;
OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl());
} else {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
notePreviousDefinition(OldD, New->getLocation());
return true;
}
}
// If the old declaration is invalid, just give up here.
if (Old->isInvalidDecl())
return true;
// Disallow redeclaration of some builtins.
if (!getASTContext().canBuiltinBeRedeclared(Old)) {
Diag(New->getLocation(), diag::err_builtin_redeclare) << Old->getDeclName();
Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
<< Old << Old->getType();
return true;
}
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation) =
getNoteDiagForInvalidRedeclaration(Old, New);
// Don't complain about this if we're in GNU89 mode and the old function
// is an extern inline function.
// Don't complain about specializations. They are not supposed to have
// storage classes.
if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
New->getStorageClass() == SC_Static &&
Old->hasExternalFormalLinkage() &&
!New->getTemplateSpecializationInfo() &&
!canRedefineFunction(Old, getLangOpts())) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static) << New;
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_static_non_static) << New;
Diag(OldLocation, PrevDiag);
return true;
}
}
if (New->hasAttr<InternalLinkageAttr>() &&
!Old->hasAttr<InternalLinkageAttr>()) {
Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
New->dropAttr<InternalLinkageAttr>();
}
if (CheckRedeclarationModuleOwnership(New, Old))
return true;
if (!getLangOpts().CPlusPlus) {
bool OldOvl = Old->hasAttr<OverloadableAttr>();
if (OldOvl != New->hasAttr<OverloadableAttr>() && !Old->isImplicit()) {
Diag(New->getLocation(), diag::err_attribute_overloadable_mismatch)
<< New << OldOvl;
// Try our best to find a decl that actually has the overloadable
// attribute for the note. In most cases (e.g. programs with only one
// broken declaration/definition), this won't matter.
//
// FIXME: We could do this if we juggled some extra state in
// OverloadableAttr, rather than just removing it.
const Decl *DiagOld = Old;
if (OldOvl) {
auto OldIter = llvm::find_if(Old->redecls(), [](const Decl *D) {
const auto *A = D->getAttr<OverloadableAttr>();
return A && !A->isImplicit();
});
// If we've implicitly added *all* of the overloadable attrs to this
// chain, emitting a "previous redecl" note is pointless.
DiagOld = OldIter == Old->redecls_end() ? nullptr : *OldIter;
}
if (DiagOld)
Diag(DiagOld->getLocation(),
diag::note_attribute_overloadable_prev_overload)
<< OldOvl;
if (OldOvl)
New->addAttr(OverloadableAttr::CreateImplicit(Context));
else
New->dropAttr<OverloadableAttr>();
}
}
// If a function is first declared with a calling convention, but is later
// declared or defined without one, all following decls assume the calling
// convention of the first.
//
// It's OK if a function is first declared without a calling convention,
// but is later declared or defined with the default calling convention.
//
// To test if either decl has an explicit calling convention, we look for
// AttributedType sugar nodes on the type as written. If they are missing or
// were canonicalized away, we assume the calling convention was implicit.
//
// Note also that we DO NOT return at this point, because we still have
// other tests to run.
QualType OldQType = Context.getCanonicalType(Old->getType());
QualType NewQType = Context.getCanonicalType(New->getType());
const FunctionType *OldType = cast<FunctionType>(OldQType);
const FunctionType *NewType = cast<FunctionType>(NewQType);
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
bool RequiresAdjustment = false;
if (OldTypeInfo.getCC() != NewTypeInfo.getCC()) {
FunctionDecl *First = Old->getFirstDecl();
const FunctionType *FT =
First->getType().getCanonicalType()->castAs<FunctionType>();
FunctionType::ExtInfo FI = FT->getExtInfo();
bool NewCCExplicit = getCallingConvAttributedType(New->getType());
if (!NewCCExplicit) {
// Inherit the CC from the previous declaration if it was specified
// there but not here.
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
} else if (New->getBuiltinID()) {
// Calling Conventions on a Builtin aren't really useful and setting a
// default calling convention and cdecl'ing some builtin redeclarations is
// common, so warn and ignore the calling convention on the redeclaration.
Diag(New->getLocation(), diag::warn_cconv_unsupported)
<< FunctionType::getNameForCallConv(NewTypeInfo.getCC())
<< (int)CallingConventionIgnoredReason::BuiltinFunction;
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
} else {
// Calling conventions aren't compatible, so complain.
bool FirstCCExplicit = getCallingConvAttributedType(First->getType());
Diag(New->getLocation(), diag::err_cconv_change)
<< FunctionType::getNameForCallConv(NewTypeInfo.getCC())
<< !FirstCCExplicit
<< (!FirstCCExplicit ? "" :
FunctionType::getNameForCallConv(FI.getCC()));
// Put the note on the first decl, since it is the one that matters.
Diag(First->getLocation(), diag::note_previous_declaration);
return true;
}
}
// FIXME: diagnose the other way around?
if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) {
NewTypeInfo = NewTypeInfo.withNoReturn(true);
RequiresAdjustment = true;
}
// Merge regparm attribute.
if (OldTypeInfo.getHasRegParm() != NewTypeInfo.getHasRegParm() ||
OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) {
if (NewTypeInfo.getHasRegParm()) {
Diag(New->getLocation(), diag::err_regparm_mismatch)
<< NewType->getRegParmType()
<< OldType->getRegParmType();
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm());
RequiresAdjustment = true;
}
// Merge ns_returns_retained attribute.
if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
if (NewTypeInfo.getProducesResult()) {
Diag(New->getLocation(), diag::err_function_attribute_mismatch)
<< "'ns_returns_retained'";
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withProducesResult(true);
RequiresAdjustment = true;
}
if (OldTypeInfo.getNoCallerSavedRegs() !=
NewTypeInfo.getNoCallerSavedRegs()) {
if (NewTypeInfo.getNoCallerSavedRegs()) {
AnyX86NoCallerSavedRegistersAttr *Attr =
New->getAttr<AnyX86NoCallerSavedRegistersAttr>();
Diag(New->getLocation(), diag::err_function_attribute_mismatch) << Attr;
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withNoCallerSavedRegs(true);
RequiresAdjustment = true;
}
if (RequiresAdjustment) {
const FunctionType *AdjustedType = New->getType()->getAs<FunctionType>();
AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo);
New->setType(QualType(AdjustedType, 0));
NewQType = Context.getCanonicalType(New->getType());
}
// If this redeclaration makes the function inline, we may need to add it to
// UndefinedButUsed.
if (!Old->isInlined() && New->isInlined() &&
!New->hasAttr<GNUInlineAttr>() &&
!getLangOpts().GNUInline &&
Old->isUsed(false) &&
!Old->isDefined() && !New->isThisDeclarationADefinition())
UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(),
SourceLocation()));
// If this redeclaration makes it newly gnu_inline, we don't want to warn
// about it.
if (New->hasAttr<GNUInlineAttr>() &&
Old->isInlined() && !Old->hasAttr<GNUInlineAttr>()) {
UndefinedButUsed.erase(Old->getCanonicalDecl());
}
// If pass_object_size params don't match up perfectly, this isn't a valid
// redeclaration.
if (Old->getNumParams() > 0 && Old->getNumParams() == New->getNumParams() &&
!hasIdenticalPassObjectSizeAttrs(Old, New)) {
Diag(New->getLocation(), diag::err_different_pass_object_size_params)
<< New->getDeclName();
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
if (getLangOpts().CPlusPlus) {
// C++1z [over.load]p2
// Certain function declarations cannot be overloaded:
// -- Function declarations that differ only in the return type,
// the exception specification, or both cannot be overloaded.
// Check the exception specifications match. This may recompute the type of
// both Old and New if it resolved exception specifications, so grab the
// types again after this. Because this updates the type, we do this before
// any of the other checks below, which may update the "de facto" NewQType
// but do not necessarily update the type of New.
if (CheckEquivalentExceptionSpec(Old, New))
return true;
OldQType = Context.getCanonicalType(Old->getType());
NewQType = Context.getCanonicalType(New->getType());
// Go back to the type source info to compare the declared return types,
// per C++1y [dcl.type.auto]p13:
// Redeclarations or specializations of a function or function template
// with a declared return type that uses a placeholder type shall also
// use that placeholder, not a deduced type.
QualType OldDeclaredReturnType = Old->getDeclaredReturnType();
QualType NewDeclaredReturnType = New->getDeclaredReturnType();
if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
canFullyTypeCheckRedeclaration(New, Old, NewDeclaredReturnType,
OldDeclaredReturnType)) {
QualType ResQT;
if (NewDeclaredReturnType->isObjCObjectPointerType() &&
OldDeclaredReturnType->isObjCObjectPointerType())
// FIXME: This does the wrong thing for a deduced return type.
ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
if (ResQT.isNull()) {
if (New->isCXXClassMember() && New->isOutOfLine())
Diag(New->getLocation(), diag::err_member_def_does_not_match_ret_type)
<< New << New->getReturnTypeSourceRange();
else
Diag(New->getLocation(), diag::err_ovl_diff_return_type)
<< New->getReturnTypeSourceRange();
Diag(OldLocation, PrevDiag) << Old << Old->getType()
<< Old->getReturnTypeSourceRange();
return true;
}
else
NewQType = ResQT;
}
QualType OldReturnType = OldType->getReturnType();
QualType NewReturnType = cast<FunctionType>(NewQType)->getReturnType();
if (OldReturnType != NewReturnType) {
// If this function has a deduced return type and has already been
// defined, copy the deduced value from the old declaration.
AutoType *OldAT = Old->getReturnType()->getContainedAutoType();
if (OldAT && OldAT->isDeduced()) {
New->setType(
SubstAutoType(New->getType(),
OldAT->isDependentType() ? Context.DependentTy
: OldAT->getDeducedType()));
NewQType = Context.getCanonicalType(
SubstAutoType(NewQType,
OldAT->isDependentType() ? Context.DependentTy
: OldAT->getDeducedType()));
}
}
const CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
if (OldMethod && NewMethod) {
// Preserve triviality.
NewMethod->setTrivial(OldMethod->isTrivial());
// MSVC allows explicit template specialization at class scope:
// 2 CXXMethodDecls referring to the same function will be injected.
// We don't want a redeclaration error.
bool IsClassScopeExplicitSpecialization =
OldMethod->isFunctionTemplateSpecialization() &&
NewMethod->isFunctionTemplateSpecialization();
bool isFriend = NewMethod->getFriendObjectKind();
if (!isFriend && NewMethod->getLexicalDeclContext()->isRecord() &&
!IsClassScopeExplicitSpecialization) {
// -- Member function declarations with the same name and the
// same parameter types cannot be overloaded if any of them
// is a static member function declaration.
if (OldMethod->isStatic() != NewMethod->isStatic()) {
Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
// C++ [class.mem]p1:
// [...] A member shall not be declared twice in the
// member-specification, except that a nested class or member
// class template can be declared and then later defined.
if (!inTemplateInstantiation()) {
unsigned NewDiag;
if (isa<CXXConstructorDecl>(OldMethod))
NewDiag = diag::err_constructor_redeclared;
else if (isa<CXXDestructorDecl>(NewMethod))
NewDiag = diag::err_destructor_redeclared;
else if (isa<CXXConversionDecl>(NewMethod))
NewDiag = diag::err_conv_function_redeclared;
else
NewDiag = diag::err_member_redeclared;
Diag(New->getLocation(), NewDiag);
} else {
Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation)
<< New << New->getType();
}
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
// Complain if this is an explicit declaration of a special
// member that was initially declared implicitly.
//
// As an exception, it's okay to befriend such methods in order
// to permit the implicit constructor/destructor/operator calls.
} else if (OldMethod->isImplicit()) {
if (isFriend) {
NewMethod->setImplicit();
} else {
Diag(NewMethod->getLocation(),
diag::err_definition_of_implicitly_declared_member)
<< New << getSpecialMember(OldMethod);
return true;
}
} else if (OldMethod->getFirstDecl()->isExplicitlyDefaulted() && !isFriend) {
Diag(NewMethod->getLocation(),
diag::err_definition_of_explicitly_defaulted_member)
<< getSpecialMember(OldMethod);
return true;
}
}
// C++11 [dcl.attr.noreturn]p1:
// The first declaration of a function shall specify the noreturn
// attribute if any declaration of that function specifies the noreturn
// attribute.
const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>();
if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) {
Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl);
Diag(Old->getFirstDecl()->getLocation(),
diag::note_noreturn_missing_first_decl);
}
// C++11 [dcl.attr.depend]p2:
// The first declaration of a function shall specify the
// carries_dependency attribute for its declarator-id if any declaration
// of the function specifies the carries_dependency attribute.
const CarriesDependencyAttr *CDA = New->getAttr<CarriesDependencyAttr>();
if (CDA && !Old->hasAttr<CarriesDependencyAttr>()) {
Diag(CDA->getLocation(),
diag::err_carries_dependency_missing_on_first_decl) << 0/*Function*/;
Diag(Old->getFirstDecl()->getLocation(),
diag::note_carries_dependency_missing_first_decl) << 0/*Function*/;
}
// (C++98 8.3.5p3):
// All declarations for a function shall agree exactly in both the
// return type and the parameter-type-list.
// We also want to respect all the extended bits except noreturn.
// noreturn should now match unless the old type info didn't have it.
QualType OldQTypeForComparison = OldQType;
if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
auto *OldType = OldQType->castAs<FunctionProtoType>();
const FunctionType *OldTypeForComparison
= Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
OldQTypeForComparison = QualType(OldTypeForComparison, 0);
assert(OldQTypeForComparison.isCanonical());
}
if (haveIncompatibleLanguageLinkages(Old, New)) {
// As a special case, retain the language linkage from previous
// declarations of a friend function as an extension.
//
// This liberal interpretation of C++ [class.friend]p3 matches GCC/MSVC
// and is useful because there's otherwise no way to specify language
// linkage within class scope.
//
// Check cautiously as the friend object kind isn't yet complete.
if (New->getFriendObjectKind() != Decl::FOK_None) {
Diag(New->getLocation(), diag::ext_retained_language_linkage) << New;
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_different_language_linkage) << New;
Diag(OldLocation, PrevDiag);
return true;
}
}
- if (OldQTypeForComparison == NewQType)
+ // If the function types are compatible, merge the declarations. Ignore the
+ // exception specifier because it was already checked above in
+ // CheckEquivalentExceptionSpec, and we don't want follow-on diagnostics
+ // about incompatible types under -fms-compatibility.
+ if (Context.hasSameFunctionTypeIgnoringExceptionSpec(OldQTypeForComparison,
+ NewQType))
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
// If the types are imprecise (due to dependent constructs in friends or
// local extern declarations), it's OK if they differ. We'll check again
// during instantiation.
if (!canFullyTypeCheckRedeclaration(New, Old, NewQType, OldQType))
return false;
// Fall through for conflicting redeclarations and redefinitions.
}
// C: Function types need to be compatible, not identical. This handles
// duplicate function decls like "void f(int); void f(enum X);" properly.
if (!getLangOpts().CPlusPlus &&
Context.typesAreCompatible(OldQType, NewQType)) {
const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
const FunctionProtoType *OldProto = nullptr;
if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
(OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
// The old declaration provided a function prototype, but the
// new declaration does not. Merge in the prototype.
assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
NewQType =
Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
OldProto->getExtProtoInfo());
New->setType(NewQType);
New->setHasInheritedPrototype();
// Synthesize parameters with the same types.
SmallVector<ParmVarDecl*, 16> Params;
for (const auto &ParamType : OldProto->param_types()) {
ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
SourceLocation(), nullptr,
ParamType, /*TInfo=*/nullptr,
SC_None, nullptr);
Param->setScopeInfo(0, Params.size());
Param->setImplicit();
Params.push_back(Param);
}
New->setParams(Params);
}
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
}
// GNU C permits a K&R definition to follow a prototype declaration
// if the declared types of the parameters in the K&R definition
// match the types in the prototype declaration, even when the
// promoted types of the parameters from the K&R definition differ
// from the types in the prototype. GCC then keeps the types from
// the prototype.
//
// If a variadic prototype is followed by a non-variadic K&R definition,
// the K&R definition becomes variadic. This is sort of an edge case, but
// it's legal per the standard depending on how you read C99 6.7.5.3p15 and
// C99 6.9.1p8.
if (!getLangOpts().CPlusPlus &&
Old->hasPrototype() && !New->hasPrototype() &&
New->getType()->getAs<FunctionProtoType>() &&
Old->getNumParams() == New->getNumParams()) {
SmallVector<QualType, 16> ArgTypes;
SmallVector<GNUCompatibleParamWarning, 16> Warnings;
const FunctionProtoType *OldProto
= Old->getType()->getAs<FunctionProtoType>();
const FunctionProtoType *NewProto
= New->getType()->getAs<FunctionProtoType>();
// Determine whether this is the GNU C extension.
QualType MergedReturn = Context.mergeTypes(OldProto->getReturnType(),
NewProto->getReturnType());
bool LooseCompatible = !MergedReturn.isNull();
for (unsigned Idx = 0, End = Old->getNumParams();
LooseCompatible && Idx != End; ++Idx) {
ParmVarDecl *OldParm = Old->getParamDecl(Idx);
ParmVarDecl *NewParm = New->getParamDecl(Idx);
if (Context.typesAreCompatible(OldParm->getType(),
NewProto->getParamType(Idx))) {
ArgTypes.push_back(NewParm->getType());
} else if (Context.typesAreCompatible(OldParm->getType(),
NewParm->getType(),
/*CompareUnqualified=*/true)) {
GNUCompatibleParamWarning Warn = { OldParm, NewParm,
NewProto->getParamType(Idx) };
Warnings.push_back(Warn);
ArgTypes.push_back(NewParm->getType());
} else
LooseCompatible = false;
}
if (LooseCompatible) {
for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) {
Diag(Warnings[Warn].NewParm->getLocation(),
diag::ext_param_promoted_not_compatible_with_prototype)
<< Warnings[Warn].PromotedType
<< Warnings[Warn].OldParm->getType();
if (Warnings[Warn].OldParm->getLocation().isValid())
Diag(Warnings[Warn].OldParm->getLocation(),
diag::note_previous_declaration);
}
if (MergeTypeWithOld)
New->setType(Context.getFunctionType(MergedReturn, ArgTypes,
OldProto->getExtProtoInfo()));
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
}
// Fall through to diagnose conflicting types.
}
// A function that has already been declared has been redeclared or
// defined with a different type; show an appropriate diagnostic.
// If the previous declaration was an implicitly-generated builtin
// declaration, then at the very least we should use a specialized note.
unsigned BuiltinID;
if (Old->isImplicit() && (BuiltinID = Old->getBuiltinID())) {
// If it's actually a library-defined builtin function like 'malloc'
// or 'printf', just warn about the incompatible redeclaration.
if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
Diag(OldLocation, diag::note_previous_builtin_declaration)
<< Old << Old->getType();
// If this is a global redeclaration, just forget hereafter
// about the "builtin-ness" of the function.
//
// Doing this for local extern declarations is problematic. If
// the builtin declaration remains visible, a second invalid
// local declaration will produce a hard error; if it doesn't
// remain visible, a single bogus local redeclaration (which is
// actually only a warning) could break all the downstream code.
if (!New->getLexicalDeclContext()->isFunctionOrMethod())
New->getIdentifier()->revertBuiltin();
return false;
}
PrevDiag = diag::note_previous_builtin_declaration;
}
Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
/// Completes the merge of two function declarations that are
/// known to be compatible.
///
/// This routine handles the merging of attributes and other
/// properties of function declarations from the old declaration to
/// the new declaration, once we know that New is in fact a
/// redeclaration of Old.
///
/// \returns false
bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld) {
// Merge the attributes
mergeDeclAttributes(New, Old);
// Merge "pure" flag.
if (Old->isPure())
New->setPure();
// Merge "used" flag.
if (Old->getMostRecentDecl()->isUsed(false))
New->setIsUsed();
// Merge attributes from the parameters. These can mismatch with K&R
// declarations.
if (New->getNumParams() == Old->getNumParams())
for (unsigned i = 0, e = New->getNumParams(); i != e; ++i) {
ParmVarDecl *NewParam = New->getParamDecl(i);
ParmVarDecl *OldParam = Old->getParamDecl(i);
mergeParamDeclAttributes(NewParam, OldParam, *this);
mergeParamDeclTypes(NewParam, OldParam, *this);
}
if (getLangOpts().CPlusPlus)
return MergeCXXFunctionDecl(New, Old, S);
// Merge the function types so the we get the composite types for the return
// and argument types. Per C11 6.2.7/4, only update the type if the old decl
// was visible.
QualType Merged = Context.mergeTypes(Old->getType(), New->getType());
if (!Merged.isNull() && MergeTypeWithOld)
New->setType(Merged);
return false;
}
void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ObjCMethodDecl *oldMethod) {
// Merge the attributes, including deprecated/unavailable
AvailabilityMergeKind MergeKind =
isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
? AMK_ProtocolImplementation
: isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
: AMK_Override;
mergeDeclAttributes(newMethod, oldMethod, MergeKind);
// Merge attributes from the parameters.
ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin(),
oe = oldMethod->param_end();
for (ObjCMethodDecl::param_iterator
ni = newMethod->param_begin(), ne = newMethod->param_end();
ni != ne && oi != oe; ++ni, ++oi)
mergeParamDeclAttributes(*ni, *oi, *this);
CheckObjCMethodOverride(newMethod, oldMethod);
}
static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
assert(!S.Context.hasSameType(New->getType(), Old->getType()));
S.Diag(New->getLocation(), New->isThisDeclarationADefinition()
? diag::err_redefinition_different_type
: diag::err_redeclaration_different_type)
<< New->getDeclName() << New->getType() << Old->getType();
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation)
= getNoteDiagForInvalidRedeclaration(Old, New);
S.Diag(OldLocation, PrevDiag);
New->setInvalidDecl();
}
/// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and
/// scope as a previous declaration 'Old'. Figure out how to merge their types,
/// emitting diagnostics as appropriate.
///
/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
/// to here in AddInitializerToDecl. We can't check them before the initializer
/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
bool MergeTypeWithOld) {
if (New->isInvalidDecl() || Old->isInvalidDecl())
return;
QualType MergedT;
if (getLangOpts().CPlusPlus) {
if (New->getType()->isUndeducedType()) {
// We don't know what the new type is until the initializer is attached.
return;
} else if (Context.hasSameType(New->getType(), Old->getType())) {
// These could still be something that needs exception specs checked.
return MergeVarDeclExceptionSpecs(New, Old);
}
// C++ [basic.link]p10:
// [...] the types specified by all declarations referring to a given
// object or function shall be identical, except that declarations for an
// array object can specify array types that differ by the presence or
// absence of a major array bound (8.3.4).
else if (Old->getType()->isArrayType() && New->getType()->isArrayType()) {
const ArrayType *OldArray = Context.getAsArrayType(Old->getType());
const ArrayType *NewArray = Context.getAsArrayType(New->getType());
// We are merging a variable declaration New into Old. If it has an array
// bound, and that bound differs from Old's bound, we should diagnose the
// mismatch.
if (!NewArray->isIncompleteArrayType() && !NewArray->isDependentType()) {
for (VarDecl *PrevVD = Old->getMostRecentDecl(); PrevVD;
PrevVD = PrevVD->getPreviousDecl()) {
const ArrayType *PrevVDTy = Context.getAsArrayType(PrevVD->getType());
if (PrevVDTy->isIncompleteArrayType() || PrevVDTy->isDependentType())
continue;
if (!Context.hasSameType(NewArray, PrevVDTy))
return diagnoseVarDeclTypeMismatch(*this, New, PrevVD);
}
}
if (OldArray->isIncompleteArrayType() && NewArray->isArrayType()) {
if (Context.hasSameType(OldArray->getElementType(),
NewArray->getElementType()))
MergedT = New->getType();
}
// FIXME: Check visibility. New is hidden but has a complete type. If New
// has no array bound, it should not inherit one from Old, if Old is not
// visible.
else if (OldArray->isArrayType() && NewArray->isIncompleteArrayType()) {
if (Context.hasSameType(OldArray->getElementType(),
NewArray->getElementType()))
MergedT = Old->getType();
}
}
else if (New->getType()->isObjCObjectPointerType() &&
Old->getType()->isObjCObjectPointerType()) {
MergedT = Context.mergeObjCGCQualifiers(New->getType(),
Old->getType());
}
} else {
// C 6.2.7p2:
// All declarations that refer to the same object or function shall have
// compatible type.
MergedT = Context.mergeTypes(New->getType(), Old->getType());
}
if (MergedT.isNull()) {
// It's OK if we couldn't merge types if either type is dependent, for a
// block-scope variable. In other cases (static data members of class
// templates, variable templates, ...), we require the types to be
// equivalent.
// FIXME: The C++ standard doesn't say anything about this.
if ((New->getType()->isDependentType() ||
Old->getType()->isDependentType()) && New->isLocalVarDecl()) {
// If the old type was dependent, we can't merge with it, so the new type
// becomes dependent for now. We'll reproduce the original type when we
// instantiate the TypeSourceInfo for the variable.
if (!New->getType()->isDependentType() && MergeTypeWithOld)
New->setType(Context.DependentTy);
return;
}
return diagnoseVarDeclTypeMismatch(*this, New, Old);
}
// Don't actually update the type on the new declaration if the old
// declaration was an extern declaration in a different scope.
if (MergeTypeWithOld)
New->setType(MergedT);
}
static bool mergeTypeWithPrevious(Sema &S, VarDecl *NewVD, VarDecl *OldVD,
LookupResult &Previous) {
// C11 6.2.7p4:
// For an identifier with internal or external linkage declared
// in a scope in which a prior declaration of that identifier is
// visible, if the prior declaration specifies internal or
// external linkage, the type of the identifier at the later
// declaration becomes the composite type.
//
// If the variable isn't visible, we do not merge with its type.
if (Previous.isShadowed())
return false;
if (S.getLangOpts().CPlusPlus) {
// C++11 [dcl.array]p3:
// If there is a preceding declaration of the entity in the same
// scope in which the bound was specified, an omitted array bound
// is taken to be the same as in that earlier declaration.
return NewVD->isPreviousDeclInSameBlockScope() ||
(!OldVD->getLexicalDeclContext()->isFunctionOrMethod() &&
!NewVD->getLexicalDeclContext()->isFunctionOrMethod());
} else {
// If the old declaration was function-local, don't merge with its
// type unless we're in the same function.
return !OldVD->getLexicalDeclContext()->isFunctionOrMethod() ||
OldVD->getLexicalDeclContext() == NewVD->getLexicalDeclContext();
}
}
/// MergeVarDecl - We just parsed a variable 'New' which has the same name
/// and scope as a previous declaration 'Old'. Figure out how to resolve this
/// situation, merging decls or emitting diagnostics as appropriate.
///
/// Tentative definition rules (C99 6.9.2p2) are checked by
/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
/// definitions here, since the initializer hasn't been attached.
///
void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
// If the new decl is already invalid, don't do any other checking.
if (New->isInvalidDecl())
return;
if (!shouldLinkPossiblyHiddenDecl(Previous, New))
return;
VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate();
// Verify the old decl was also a variable or variable template.
VarDecl *Old = nullptr;
VarTemplateDecl *OldTemplate = nullptr;
if (Previous.isSingleResult()) {
if (NewTemplate) {
OldTemplate = dyn_cast<VarTemplateDecl>(Previous.getFoundDecl());
Old = OldTemplate ? OldTemplate->getTemplatedDecl() : nullptr;
if (auto *Shadow =
dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
if (checkUsingShadowRedecl<VarTemplateDecl>(*this, Shadow, NewTemplate))
return New->setInvalidDecl();
} else {
Old = dyn_cast<VarDecl>(Previous.getFoundDecl());
if (auto *Shadow =
dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
if (checkUsingShadowRedecl<VarDecl>(*this, Shadow, New))
return New->setInvalidDecl();
}
}
if (!Old) {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
notePreviousDefinition(Previous.getRepresentativeDecl(),
New->getLocation());
return New->setInvalidDecl();
}
// Ensure the template parameters are compatible.
if (NewTemplate &&
!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
/*Complain=*/true, TPL_TemplateMatch))
return New->setInvalidDecl();
// C++ [class.mem]p1:
// A member shall not be declared twice in the member-specification [...]
//
// Here, we need only consider static data members.
if (Old->isStaticDataMember() && !New->isOutOfLine()) {
Diag(New->getLocation(), diag::err_duplicate_member)
<< New->getIdentifier();
Diag(Old->getLocation(), diag::note_previous_declaration);
New->setInvalidDecl();
}
mergeDeclAttributes(New, Old);
// Warn if an already-declared variable is made a weak_import in a subsequent
// declaration
if (New->hasAttr<WeakImportAttr>() &&
Old->getStorageClass() == SC_None &&
!Old->hasAttr<WeakImportAttr>()) {
Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
// Remove weak_import attribute on new declaration.
New->dropAttr<WeakImportAttr>();
}
if (New->hasAttr<InternalLinkageAttr>() &&
!Old->hasAttr<InternalLinkageAttr>()) {
Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
New->dropAttr<InternalLinkageAttr>();
}
// Merge the types.
VarDecl *MostRecent = Old->getMostRecentDecl();
if (MostRecent != Old) {
MergeVarDeclTypes(New, MostRecent,
mergeTypeWithPrevious(*this, New, MostRecent, Previous));
if (New->isInvalidDecl())
return;
}
MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous));
if (New->isInvalidDecl())
return;
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation) =
getNoteDiagForInvalidRedeclaration(Old, New);
// [dcl.stc]p8: Check if we have a non-static decl followed by a static.
if (New->getStorageClass() == SC_Static &&
!New->isStaticDataMember() &&
Old->hasExternalFormalLinkage()) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static)
<< New->getDeclName();
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_static_non_static)
<< New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
}
// C99 6.2.2p4:
// For an identifier declared with the storage-class specifier
// extern in a scope in which a prior declaration of that
// identifier is visible,23) if the prior declaration specifies
// internal or external linkage, the linkage of the identifier at
// the later declaration is the same as the linkage specified at
// the prior declaration. If no prior declaration is visible, or
// if the prior declaration specifies no linkage, then the
// identifier has external linkage.
if (New->hasExternalStorage() && Old->hasLinkage())
/* Okay */;
else if (New->getCanonicalDecl()->getStorageClass() != SC_Static &&
!New->isStaticDataMember() &&
Old->getCanonicalDecl()->getStorageClass() == SC_Static) {
Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
// Check if extern is followed by non-extern and vice-versa.
if (New->hasExternalStorage() &&
!Old->hasLinkage() && Old->isLocalVarDeclOrParm()) {
Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (Old->hasLinkage() && New->isLocalVarDeclOrParm() &&
!New->hasExternalStorage()) {
Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (CheckRedeclarationModuleOwnership(New, Old))
return;
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
// FIXME: The test for external storage here seems wrong? We still
// need to check for mismatches.
if (!New->hasExternalStorage() && !New->isFileVarDecl() &&
// Don't complain about out-of-line definitions of static members.
!(Old->getLexicalDeclContext()->isRecord() &&
!New->getLexicalDeclContext()->isRecord())) {
Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (New->isInline() && !Old->getMostRecentDecl()->isInline()) {
if (VarDecl *Def = Old->getDefinition()) {
// C++1z [dcl.fcn.spec]p4:
// If the definition of a variable appears in a translation unit before
// its first declaration as inline, the program is ill-formed.
Diag(New->getLocation(), diag::err_inline_decl_follows_def) << New;
Diag(Def->getLocation(), diag::note_previous_definition);
}
}
// If this redeclaration makes the variable inline, we may need to add it to
// UndefinedButUsed.
if (!Old->isInline() && New->isInline() && Old->isUsed(false) &&
!Old->getDefinition() && !New->isThisDeclarationADefinition())
UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(),
SourceLocation()));
if (New->getTLSKind() != Old->getTLSKind()) {
if (!Old->getTLSKind()) {
Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName();
Diag(OldLocation, PrevDiag);
} else if (!New->getTLSKind()) {
Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName();
Diag(OldLocation, PrevDiag);
} else {
// Do not allow redeclaration to change the variable between requiring
// static and dynamic initialization.
// FIXME: GCC allows this, but uses the TLS keyword on the first
// declaration to determine the kind. Do we need to be compatible here?
Diag(New->getLocation(), diag::err_thread_thread_different_kind)
<< New->getDeclName() << (New->getTLSKind() == VarDecl::TLS_Dynamic);
Diag(OldLocation, PrevDiag);
}
}
// C++ doesn't have tentative definitions, so go right ahead and check here.
if (getLangOpts().CPlusPlus &&
New->isThisDeclarationADefinition() == VarDecl::Definition) {
if (Old->isStaticDataMember() && Old->getCanonicalDecl()->isInline() &&
Old->getCanonicalDecl()->isConstexpr()) {
// This definition won't be a definition any more once it's been merged.
Diag(New->getLocation(),
diag::warn_deprecated_redundant_constexpr_static_def);
} else if (VarDecl *Def = Old->getDefinition()) {
if (checkVarDeclRedefinition(Def, New))
return;
}
}
if (haveIncompatibleLanguageLinkages(Old, New)) {
Diag(New->getLocation(), diag::err_different_language_linkage) << New;
Diag(OldLocation, PrevDiag);
New->setInvalidDecl();
return;
}
// Merge "used" flag.
if (Old->getMostRecentDecl()->isUsed(false))
New->setIsUsed();
// Keep a chain of previous declarations.
New->setPreviousDecl(Old);
if (NewTemplate)
NewTemplate->setPreviousDecl(OldTemplate);
adjustDeclContextForDeclaratorDecl(New, Old);
// Inherit access appropriately.
New->setAccess(Old->getAccess());
if (NewTemplate)
NewTemplate->setAccess(New->getAccess());
if (Old->isInline())
New->setImplicitlyInline();
}
void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
SourceManager &SrcMgr = getSourceManager();
auto FNewDecLoc = SrcMgr.getDecomposedLoc(New);
auto FOldDecLoc = SrcMgr.getDecomposedLoc(Old->getLocation());
auto *FNew = SrcMgr.getFileEntryForID(FNewDecLoc.first);
auto *FOld = SrcMgr.getFileEntryForID(FOldDecLoc.first);
auto &HSI = PP.getHeaderSearchInfo();
StringRef HdrFilename =
SrcMgr.getFilename(SrcMgr.getSpellingLoc(Old->getLocation()));
auto noteFromModuleOrInclude = [&](Module *Mod,
SourceLocation IncLoc) -> bool {
// Redefinition errors with modules are common with non modular mapped
// headers, example: a non-modular header H in module A that also gets
// included directly in a TU. Pointing twice to the same header/definition
// is confusing, try to get better diagnostics when modules is on.
if (IncLoc.isValid()) {
if (Mod) {
Diag(IncLoc, diag::note_redefinition_modules_same_file)
<< HdrFilename.str() << Mod->getFullModuleName();
if (!Mod->DefinitionLoc.isInvalid())
Diag(Mod->DefinitionLoc, diag::note_defined_here)
<< Mod->getFullModuleName();
} else {
Diag(IncLoc, diag::note_redefinition_include_same_file)
<< HdrFilename.str();
}
return true;
}
return false;
};
// Is it the same file and same offset? Provide more information on why
// this leads to a redefinition error.
bool EmittedDiag = false;
if (FNew == FOld && FNewDecLoc.second == FOldDecLoc.second) {
SourceLocation OldIncLoc = SrcMgr.getIncludeLoc(FOldDecLoc.first);
SourceLocation NewIncLoc = SrcMgr.getIncludeLoc(FNewDecLoc.first);
EmittedDiag = noteFromModuleOrInclude(Old->getOwningModule(), OldIncLoc);
EmittedDiag |= noteFromModuleOrInclude(getCurrentModule(), NewIncLoc);
// If the header has no guards, emit a note suggesting one.
if (FOld && !HSI.isFileMultipleIncludeGuarded(FOld))
Diag(Old->getLocation(), diag::note_use_ifdef_guards);
if (EmittedDiag)
return;
}
// Redefinition coming from different files or couldn't do better above.
if (Old->getLocation().isValid())
Diag(Old->getLocation(), diag::note_previous_definition);
}
/// We've just determined that \p Old and \p New both appear to be definitions
/// of the same variable. Either diagnose or fix the problem.
bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
if (!hasVisibleDefinition(Old) &&
(New->getFormalLinkage() == InternalLinkage ||
New->isInline() ||
New->getDescribedVarTemplate() ||
New->getNumTemplateParameterLists() ||
New->getDeclContext()->isDependentContext())) {
// The previous definition is hidden, and multiple definitions are
// permitted (in separate TUs). Demote this to a declaration.
New->demoteThisDefinitionToDeclaration();
// Make the canonical definition visible.
if (auto *OldTD = Old->getDescribedVarTemplate())
makeMergedDefinitionVisible(OldTD);
makeMergedDefinitionVisible(Old);
return false;
} else {
Diag(New->getLocation(), diag::err_redefinition) << New;
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
}
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed.
Decl *
Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord) {
return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg(), false,
AnonRecord);
}
// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to
// disambiguate entities defined in different scopes.
// While the VS2015 ABI fixes potential miscompiles, it is also breaks
// compatibility.
// We will pick our mangling number depending on which version of MSVC is being
// targeted.
static unsigned getMSManglingNumber(const LangOptions &LO, Scope *S) {
return LO.isCompatibleWithMSVC(LangOptions::MSVC2015)
? S->getMSCurManglingNumber()
: S->getMSLastManglingNumber();
}
void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
if (!Context.getLangOpts().CPlusPlus)
return;
if (isa<CXXRecordDecl>(Tag->getParent())) {
// If this tag is the direct child of a class, number it if
// it is anonymous.
if (!Tag->getName().empty() || Tag->getTypedefNameForAnonDecl())
return;
MangleNumberingContext &MCtx =
Context.getManglingNumberContext(Tag->getParent());
Context.setManglingNumber(
Tag, MCtx.getManglingNumber(
Tag, getMSManglingNumber(getLangOpts(), TagScope)));
return;
}
// If this tag isn't a direct child of a class, number it if it is local.
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
Tag->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
Tag, MCtx->getManglingNumber(
Tag, getMSManglingNumber(getLangOpts(), TagScope)));
}
}
void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD) {
if (TagFromDeclSpec->isInvalidDecl())
return;
// Do nothing if the tag already has a name for linkage purposes.
if (TagFromDeclSpec->hasNameForLinkage())
return;
// A well-formed anonymous tag must always be a TUK_Definition.
assert(TagFromDeclSpec->isThisDeclarationADefinition());
// The type must match the tag exactly; no qualifiers allowed.
if (!Context.hasSameType(NewTD->getUnderlyingType(),
Context.getTagDeclType(TagFromDeclSpec))) {
if (getLangOpts().CPlusPlus)
Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
return;
}
// If we've already computed linkage for the anonymous tag, then
// adding a typedef name for the anonymous decl can change that
// linkage, which might be a serious problem. Diagnose this as
// unsupported and ignore the typedef name. TODO: we should
// pursue this as a language defect and establish a formal rule
// for how to handle it.
if (TagFromDeclSpec->hasLinkageBeenComputed()) {
Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage);
SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart();
tagLoc = getLocForEndOfToken(tagLoc);
llvm::SmallString<40> textToInsert;
textToInsert += ' ';
textToInsert += NewTD->getIdentifier()->getName();
Diag(tagLoc, diag::note_typedef_changes_linkage)
<< FixItHint::CreateInsertion(tagLoc, textToInsert);
return;
}
// Otherwise, set this is the anon-decl typedef for the tag.
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
switch (T) {
case DeclSpec::TST_class:
return 0;
case DeclSpec::TST_struct:
return 1;
case DeclSpec::TST_interface:
return 2;
case DeclSpec::TST_union:
return 3;
case DeclSpec::TST_enum:
return 4;
default:
llvm_unreachable("unexpected type specifier");
}
}
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
/// parameters to cope with template friend declarations.
Decl *
Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord) {
Decl *TagD = nullptr;
TagDecl *Tag = nullptr;
if (DS.getTypeSpecType() == DeclSpec::TST_class ||
DS.getTypeSpecType() == DeclSpec::TST_struct ||
DS.getTypeSpecType() == DeclSpec::TST_interface ||
DS.getTypeSpecType() == DeclSpec::TST_union ||
DS.getTypeSpecType() == DeclSpec::TST_enum) {
TagD = DS.getRepAsDecl();
if (!TagD) // We probably had an error
return nullptr;
// Note that the above type specs guarantee that the
// type rep is a Decl, whereas in many of the others
// it's a Type.
if (isa<TagDecl>(TagD))
Tag = cast<TagDecl>(TagD);
else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD))
Tag = CTD->getTemplatedDecl();
}
if (Tag) {
handleTagNumbering(Tag, S);
Tag->setFreeStanding();
if (Tag->isInvalidDecl())
return Tag;
}
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
// Enforce C99 6.7.3p2: "Types other than pointer types derived from object
// or incomplete types shall not be restrict-qualified."
if (TypeQuals & DeclSpec::TQ_restrict)
Diag(DS.getRestrictSpecLoc(),
diag::err_typecheck_invalid_restrict_not_pointer_noarg)
<< DS.getSourceRange();
}
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DS.hasConstexprSpecifier()) {
// C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations
// and definitions of functions and variables.
// C++2a [dcl.constexpr]p1: The consteval specifier shall be applied only to
// the declaration of a function or function template
bool IsConsteval = DS.getConstexprSpecifier() == CSK_consteval;
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << IsConsteval;
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
<< IsConsteval;
// Don't emit warnings after this error.
return TagD;
}
DiagnoseFunctionSpecifiers(DS);
if (DS.isFriendSpecified()) {
// If we're dealing with a decl but not a TagDecl, assume that
// whatever routines created it handled the friendship aspect.
if (TagD && !Tag)
return nullptr;
return ActOnFriendTypeDecl(S, DS, TemplateParams);
}
const CXXScopeSpec &SS = DS.getTypeSpecScope();
bool IsExplicitSpecialization =
!TemplateParams.empty() && TemplateParams.back()->size() == 0;
if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() &&
!IsExplicitInstantiation && !IsExplicitSpecialization &&
!isa<ClassTemplatePartialSpecializationDecl>(Tag)) {
// Per C++ [dcl.type.elab]p1, a class declaration cannot have a
// nested-name-specifier unless it is an explicit instantiation
// or an explicit specialization.
//
// FIXME: We allow class template partial specializations here too, per the
// obvious intent of DR1819.
//
// Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
return nullptr;
}
// Track whether this decl-specifier declares anything.
bool DeclaresAnything = true;
// Handle anonymous struct definitions.
if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
if (!Record->getDeclName() && Record->isCompleteDefinition() &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
if (getLangOpts().CPlusPlus ||
Record->getDeclContext()->isRecord()) {
// If CurContext is a DeclContext that can contain statements,
// RecursiveASTVisitor won't visit the decls that
// BuildAnonymousStructOrUnion() will put into CurContext.
// Also store them here so that they can be part of the
// DeclStmt that gets created in this case.
// FIXME: Also return the IndirectFieldDecls created by
// BuildAnonymousStructOr union, for the same reason?
if (CurContext->isFunctionOrMethod())
AnonRecord = Record;
return BuildAnonymousStructOrUnion(S, DS, AS, Record,
Context.getPrintingPolicy());
}
DeclaresAnything = false;
}
}
// C11 6.7.2.1p2:
// A struct-declaration that does not declare an anonymous structure or
// anonymous union shall contain a struct-declarator-list.
//
// This rule also existed in C89 and C99; the grammar for struct-declaration
// did not permit a struct-declaration without a struct-declarator-list.
if (!getLangOpts().CPlusPlus && CurContext->isRecord() &&
DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
// Check for Microsoft C extension: anonymous struct/union member.
// Handle 2 kinds of anonymous struct/union:
// struct STRUCT;
// union UNION;
// and
// STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
// UNION_TYPE; <- where UNION_TYPE is a typedef union.
if ((Tag && Tag->getDeclName()) ||
DS.getTypeSpecType() == DeclSpec::TST_typename) {
RecordDecl *Record = nullptr;
if (Tag)
Record = dyn_cast<RecordDecl>(Tag);
else if (const RecordType *RT =
DS.getRepAsType().get()->getAsStructureType())
Record = RT->getDecl();
else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
Record = UT->getDecl();
if (Record && getLangOpts().MicrosoftExt) {
Diag(DS.getBeginLoc(), diag::ext_ms_anonymous_record)
<< Record->isUnion() << DS.getSourceRange();
return BuildMicrosoftCAnonymousStruct(S, DS, Record);
}
DeclaresAnything = false;
}
}
// Skip all the checks below if we have a type error.
if (DS.getTypeSpecType() == DeclSpec::TST_error ||
(TagD && TagD->isInvalidDecl()))
return TagD;
if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
if (Enum->enumerator_begin() == Enum->enumerator_end() &&
!Enum->getIdentifier() && !Enum->isInvalidDecl())
DeclaresAnything = false;
if (!DS.isMissingDeclaratorOk()) {
// Customize diagnostic for a typedef missing a name.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
Diag(DS.getBeginLoc(), diag::ext_typedef_without_a_name)
<< DS.getSourceRange();
else
DeclaresAnything = false;
}
if (DS.isModulePrivateSpecified() &&
Tag && Tag->getDeclContext()->isFunctionOrMethod())
Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class)
<< Tag->getTagKind()
<< FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
ActOnDocumentableDecl(TagD);
// C 6.7/2:
// A declaration [...] shall declare at least a declarator [...], a tag,
// or the members of an enumeration.
// C++ [dcl.dcl]p3:
// [If there are no declarators], and except for the declaration of an
// unnamed bit-field, the decl-specifier-seq shall introduce one or more
// names into the program, or shall redeclare a name introduced by a
// previous declaration.
if (!DeclaresAnything) {
// In C, we allow this as a (popular) extension / bug. Don't bother
// producing further diagnostics for redundant qualifiers after this.
Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
return TagD;
}
// C++ [dcl.stc]p1:
// If a storage-class-specifier appears in a decl-specifier-seq, [...] the
// init-declarator-list of the declaration shall not be empty.
// C++ [dcl.fct.spec]p1:
// If a cv-qualifier appears in a decl-specifier-seq, the
// init-declarator-list of the declaration shall not be empty.
//
// Spurious qualifiers here appear to be valid in C.
unsigned DiagID = diag::warn_standalone_specifier;
if (getLangOpts().CPlusPlus)
DiagID = diag::ext_standalone_specifier;
// Note that a linkage-specification sets a storage class, but
// 'extern "C" struct foo;' is actually valid and not theoretically
// useless.
if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) {
if (SCS == DeclSpec::SCS_mutable)
// Since mutable is not a viable storage class specifier in C, there is
// no reason to treat it as an extension. Instead, diagnose as an error.
Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_nonmember);
else if (!DS.isExternInLinkageSpec() && SCS != DeclSpec::SCS_typedef)
Diag(DS.getStorageClassSpecLoc(), DiagID)
<< DeclSpec::getSpecifierName(SCS);
}
if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
Diag(DS.getThreadStorageClassSpecLoc(), DiagID)
<< DeclSpec::getSpecifierName(TSCS);
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), DiagID) << "const";
if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
Diag(DS.getConstSpecLoc(), DiagID) << "volatile";
// Restrict is covered above.
if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
Diag(DS.getAtomicSpecLoc(), DiagID) << "_Atomic";
if (DS.getTypeQualifiers() & DeclSpec::TQ_unaligned)
Diag(DS.getUnalignedSpecLoc(), DiagID) << "__unaligned";
}
// Warn about ignored type attributes, for example:
// __attribute__((aligned)) struct A;
// Attributes should be placed after tag to apply to type declaration.
if (!DS.getAttributes().empty()) {
DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
if (TypeSpecType == DeclSpec::TST_class ||
TypeSpecType == DeclSpec::TST_struct ||
TypeSpecType == DeclSpec::TST_interface ||
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
for (const ParsedAttr &AL : DS.getAttributes())
Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
<< AL.getName() << GetDiagnosticTypeSpecifierID(TypeSpecType);
}
}
return TagD;
}
/// We are trying to inject an anonymous member into the given scope;
/// check if there's an existing declaration that can't be overloaded.
///
/// \return true if this is a forbidden redeclaration
static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
Scope *S,
DeclContext *Owner,
DeclarationName Name,
SourceLocation NameLoc,
bool IsUnion) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
Sema::ForVisibleRedeclaration);
if (!SemaRef.LookupName(R, S)) return false;
// Pick a representative declaration.
NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
assert(PrevDecl && "Expected a non-null Decl");
if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
return false;
SemaRef.Diag(NameLoc, diag::err_anonymous_record_member_redecl)
<< IsUnion << Name;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
return true;
}
/// InjectAnonymousStructOrUnionMembers - Inject the members of the
/// anonymous struct or union AnonRecord into the owning context Owner
/// and scope S. This routine will be invoked just after we realize
/// that an unnamed union or struct is actually an anonymous union or
/// struct, e.g.,
///
/// @code
/// union {
/// int i;
/// float f;
/// }; // InjectAnonymousStructOrUnionMembers called here to inject i and
/// // f into the surrounding scope.x
/// @endcode
///
/// This routine is recursive, injecting the names of nested anonymous
/// structs/unions into the owning context and scope as well.
static bool
InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner,
RecordDecl *AnonRecord, AccessSpecifier AS,
SmallVectorImpl<NamedDecl *> &Chaining) {
bool Invalid = false;
// Look every FieldDecl and IndirectFieldDecl with a name.
for (auto *D : AnonRecord->decls()) {
if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) &&
cast<NamedDecl>(D)->getDeclName()) {
ValueDecl *VD = cast<ValueDecl>(D);
if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
VD->getLocation(),
AnonRecord->isUnion())) {
// C++ [class.union]p2:
// The names of the members of an anonymous union shall be
// distinct from the names of any other entity in the
// scope in which the anonymous union is declared.
Invalid = true;
} else {
// C++ [class.union]p2:
// For the purpose of name lookup, after the anonymous union
// definition, the members of the anonymous union are
// considered to have been defined in the scope in which the
// anonymous union is declared.
unsigned OldChainingSize = Chaining.size();
if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
Chaining.append(IF->chain_begin(), IF->chain_end());
else
Chaining.push_back(VD);
assert(Chaining.size() >= 2);
NamedDecl **NamedChain =
new (SemaRef.Context)NamedDecl*[Chaining.size()];
for (unsigned i = 0; i < Chaining.size(); i++)
NamedChain[i] = Chaining[i];
IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
VD->getType(), {NamedChain, Chaining.size()});
for (const auto *Attr : VD->attrs())
IndirectField->addAttr(Attr->clone(SemaRef.Context));
IndirectField->setAccess(AS);
IndirectField->setImplicit();
SemaRef.PushOnScopeChains(IndirectField, S);
// That includes picking up the appropriate access specifier.
if (AS != AS_none) IndirectField->setAccess(AS);
Chaining.resize(OldChainingSize);
}
}
}
return Invalid;
}
/// StorageClassSpecToVarDeclStorageClass - Maps a DeclSpec::SCS to
/// a VarDecl::StorageClass. Any error reporting is up to the caller:
/// illegal input values are mapped to SC_None.
static StorageClass
StorageClassSpecToVarDeclStorageClass(const DeclSpec &DS) {
DeclSpec::SCS StorageClassSpec = DS.getStorageClassSpec();
assert(StorageClassSpec != DeclSpec::SCS_typedef &&
"Parser allowed 'typedef' as storage class VarDecl.");
switch (StorageClassSpec) {
case DeclSpec::SCS_unspecified: return SC_None;
case DeclSpec::SCS_extern:
if (DS.isExternInLinkageSpec())
return SC_None;
return SC_Extern;
case DeclSpec::SCS_static: return SC_Static;
case DeclSpec::SCS_auto: return SC_Auto;
case DeclSpec::SCS_register: return SC_Register;
case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
// Illegal SCSs map to None: error reporting is up to the caller.
case DeclSpec::SCS_mutable: // Fall through.
case DeclSpec::SCS_typedef: return SC_None;
}
llvm_unreachable("unknown storage class specifier");
}
static SourceLocation findDefaultInitializer(const CXXRecordDecl *Record) {
assert(Record->hasInClassInitializer());
for (const auto *I : Record->decls()) {
const auto *FD = dyn_cast<FieldDecl>(I);
if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
FD = IFD->getAnonField();
if (FD && FD->hasInClassInitializer())
return FD->getLocation();
}
llvm_unreachable("couldn't find in-class initializer");
}
static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
SourceLocation DefaultInitLoc) {
if (!Parent->isUnion() || !Parent->hasInClassInitializer())
return;
S.Diag(DefaultInitLoc, diag::err_multiple_mem_union_initialization);
S.Diag(findDefaultInitializer(Parent), diag::note_previous_initializer) << 0;
}
static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
CXXRecordDecl *AnonUnion) {
if (!Parent->isUnion() || !Parent->hasInClassInitializer())
return;
checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion));
}
/// BuildAnonymousStructOrUnion - Handle the declaration of an
/// anonymous structure or union. Anonymous unions are a C++ feature
/// (C++ [class.union]) and a C11 feature; anonymous structures
/// are a C11 feature and GNU C++ extension.
Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy) {
DeclContext *Owner = Record->getDeclContext();
// Diagnose whether this anonymous struct/union is an extension.
if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11)
Diag(Record->getLocation(), diag::ext_anonymous_union);
else if (!Record->isUnion() && getLangOpts().CPlusPlus)
Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct);
else if (!Record->isUnion() && !getLangOpts().C11)
Diag(Record->getLocation(), diag::ext_c11_anonymous_struct);
// C and C++ require different kinds of checks for anonymous
// structs/unions.
bool Invalid = false;
if (getLangOpts().CPlusPlus) {
const char *PrevSpec = nullptr;
unsigned DiagID;
if (Record->isUnion()) {
// C++ [class.union]p6:
// C++17 [class.union.anon]p2:
// Anonymous unions declared in a named namespace or in the
// global namespace shall be declared static.
DeclContext *OwnerScope = Owner->getRedeclContext();
if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
(OwnerScope->isTranslationUnit() ||
(OwnerScope->isNamespace() &&
!cast<NamespaceDecl>(OwnerScope)->isAnonymousNamespace()))) {
Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
<< FixItHint::CreateInsertion(Record->getLocation(), "static ");
// Recover by adding 'static'.
DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
PrevSpec, DiagID, Policy);
}
// C++ [class.union]p6:
// A storage class is not allowed in a declaration of an
// anonymous union in a class scope.
else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
isa<RecordDecl>(Owner)) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_anonymous_union_with_storage_spec)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
// Recover by removing the storage specifier.
DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified,
SourceLocation(),
PrevSpec, DiagID, Context.getPrintingPolicy());
}
}
// Ignore const/volatile/restrict qualifiers.
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "const"
<< FixItHint::CreateRemoval(DS.getConstSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
Diag(DS.getVolatileSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "volatile"
<< FixItHint::CreateRemoval(DS.getVolatileSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
Diag(DS.getRestrictSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "restrict"
<< FixItHint::CreateRemoval(DS.getRestrictSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
Diag(DS.getAtomicSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "_Atomic"
<< FixItHint::CreateRemoval(DS.getAtomicSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_unaligned)
Diag(DS.getUnalignedSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "__unaligned"
<< FixItHint::CreateRemoval(DS.getUnalignedSpecLoc());
DS.ClearTypeQualifiers();
}
// C++ [class.union]p2:
// The member-specification of an anonymous union shall only
// define non-static data members. [Note: nested types and
// functions cannot be declared within an anonymous union. ]
for (auto *Mem : Record->decls()) {
if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
// C++ [class.union]p3:
// An anonymous union shall not have private or protected
// members (clause 11).
assert(FD->getAccess() != AS_none);
if (FD->getAccess() != AS_public) {
Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
<< Record->isUnion() << (FD->getAccess() == AS_protected);
Invalid = true;
}
// C++ [class.union]p1
// An object of a class with a non-trivial constructor, a non-trivial
// copy constructor, a non-trivial destructor, or a non-trivial copy
// assignment operator cannot be a member of a union, nor can an
// array of such objects.
if (CheckNontrivialField(FD))
Invalid = true;
} else if (Mem->isImplicit()) {
// Any implicit members are fine.
} else if (isa<TagDecl>(Mem) && Mem->getDeclContext() != Record) {
// This is a type that showed up in an
// elaborated-type-specifier inside the anonymous struct or
// union, but which actually declares a type outside of the
// anonymous struct or union. It's okay.
} else if (auto *MemRecord = dyn_cast<RecordDecl>(Mem)) {
if (!MemRecord->isAnonymousStructOrUnion() &&
MemRecord->getDeclName()) {
// Visual C++ allows type definition in anonymous struct or union.
if (getLangOpts().MicrosoftExt)
Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
<< Record->isUnion();
else {
// This is a nested type declaration.
Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
<< Record->isUnion();
Invalid = true;
}
} else {
// This is an anonymous type definition within another anonymous type.
// This is a popular extension, provided by Plan9, MSVC and GCC, but
// not part of standard C++.
Diag(MemRecord->getLocation(),
diag::ext_anonymous_record_with_anonymous_type)
<< Record->isUnion();
}
} else if (isa<AccessSpecDecl>(Mem)) {
// Any access specifier is fine.
} else if (isa<StaticAssertDecl>(Mem)) {
// In C++1z, static_assert declarations are also fine.
} else {
// We have something that isn't a non-static data
// member. Complain about it.
unsigned DK = diag::err_anonymous_record_bad_member;
if (isa<TypeDecl>(Mem))
DK = diag::err_anonymous_record_with_type;
else if (isa<FunctionDecl>(Mem))
DK = diag::err_anonymous_record_with_function;
else if (isa<VarDecl>(Mem))
DK = diag::err_anonymous_record_with_static;
// Visual C++ allows type definition in anonymous struct or union.
if (getLangOpts().MicrosoftExt &&
DK == diag::err_anonymous_record_with_type)
Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type)
<< Record->isUnion();
else {
Diag(Mem->getLocation(), DK) << Record->isUnion();
Invalid = true;
}
}
}
// C++11 [class.union]p8 (DR1460):
// At most one variant member of a union may have a
// brace-or-equal-initializer.
if (cast<CXXRecordDecl>(Record)->hasInClassInitializer() &&
Owner->isRecord())
checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Owner),
cast<CXXRecordDecl>(Record));
}
if (!Record->isUnion() && !Owner->isRecord()) {
Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
<< getLangOpts().CPlusPlus;
Invalid = true;
}
// C++ [dcl.dcl]p3:
// [If there are no declarators], and except for the declaration of an
// unnamed bit-field, the decl-specifier-seq shall introduce one or more
// names into the program
// C++ [class.mem]p2:
// each such member-declaration shall either declare at least one member
// name of the class or declare at least one unnamed bit-field
//
// For C this is an error even for a named struct, and is diagnosed elsewhere.
if (getLangOpts().CPlusPlus && Record->field_empty())
Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
// Mock up a declarator.
Declarator Dc(DS, DeclaratorContext::MemberContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
// Create a declaration for this anonymous struct/union.
NamedDecl *Anon = nullptr;
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
Anon = FieldDecl::Create(
Context, OwningClass, DS.getBeginLoc(), Record->getLocation(),
/*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
if (getLangOpts().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
Diag(Record->getLocation(), diag::err_mutable_nonmember);
Invalid = true;
SC = SC_None;
}
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record), TInfo, SC);
// Default-initialize the implicit variable. This initialization will be
// trivial in almost all cases, except if a union member has an in-class
// initializer:
// union { int n = 0; };
ActOnUninitializedDecl(Anon);
}
Anon->setImplicit();
// Mark this as an anonymous struct/union type.
Record->setAnonymousStructOrUnion(true);
// Add the anonymous struct/union object to the current
// context. We'll be referencing this object when we refer to one of
// its members.
Owner->addDecl(Anon);
// Inject the members of the anonymous struct/union into the owning
// context and into the identifier resolver chain for name lookup
// purposes.
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS, Chain))
Invalid = true;
if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) {
if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
NewVD->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
NewVD, MCtx->getManglingNumber(
NewVD, getMSManglingNumber(getLangOpts(), S)));
Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
}
}
}
if (Invalid)
Anon->setInvalidDecl();
return Anon;
}
/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
/// Microsoft C anonymous structure.
/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
/// Example:
///
/// struct A { int a; };
/// struct B { struct A; int b; };
///
/// void foo() {
/// B var;
/// var.a = 3;
/// }
///
Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record) {
assert(Record && "expected a record!");
// Mock up a declarator.
Declarator Dc(DS, DeclaratorContext::TypeNameContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
auto *ParentDecl = cast<RecordDecl>(CurContext);
QualType RecTy = Context.getTypeDeclType(Record);
// Create a declaration for this anonymous struct.
NamedDecl *Anon =
FieldDecl::Create(Context, ParentDecl, DS.getBeginLoc(), DS.getBeginLoc(),
/*IdentifierInfo=*/nullptr, RecTy, TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setImplicit();
// Add the anonymous struct object to the current context.
CurContext->addDecl(Anon);
// Inject the members of the anonymous struct into the current
// context and into the identifier resolver chain for name lookup
// purposes.
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
RecordDecl *RecordDef = Record->getDefinition();
if (RequireCompleteType(Anon->getLocation(), RecTy,
diag::err_field_incomplete) ||
InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
AS_none, Chain)) {
Anon->setInvalidDecl();
ParentDecl->setInvalidDecl();
}
return Anon;
}
/// GetNameForDeclarator - Determine the full declaration name for the
/// given Declarator.
DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
return GetNameFromUnqualifiedId(D.getName());
}
/// Retrieves the declaration name from a parsed unqualified-id.
DeclarationNameInfo
Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
DeclarationNameInfo NameInfo;
NameInfo.setLoc(Name.StartLocation);
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_ImplicitSelfParam:
case UnqualifiedIdKind::IK_Identifier:
NameInfo.setName(Name.Identifier);
return NameInfo;
case UnqualifiedIdKind::IK_DeductionGuideName: {
// C++ [temp.deduct.guide]p3:
// The simple-template-id shall name a class template specialization.
// The template-name shall be the same identifier as the template-name
// of the simple-template-id.
// These together intend to imply that the template-name shall name a
// class template.
// FIXME: template<typename T> struct X {};
// template<typename T> using Y = X<T>;
// Y(int) -> Y<int>;
// satisfies these rules but does not name a class template.
TemplateName TN = Name.TemplateName.get().get();
auto *Template = TN.getAsTemplateDecl();
if (!Template || !isa<ClassTemplateDecl>(Template)) {
Diag(Name.StartLocation,
diag::err_deduction_guide_name_not_class_template)
<< (int)getTemplateNameKindForDiagnostics(TN) << TN;
if (Template)
Diag(Template->getLocation(), diag::note_template_decl_here);
return DeclarationNameInfo();
}
NameInfo.setName(
Context.DeclarationNames.getCXXDeductionGuideName(Template));
return NameInfo;
}
case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
= Name.OperatorFunctionId.SymbolLocations[0];
NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
= Name.EndLocation.getRawEncoding();
return NameInfo;
case UnqualifiedIdKind::IK_LiteralOperatorId:
NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
Name.Identifier));
NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
return NameInfo;
case UnqualifiedIdKind::IK_ConversionFunctionId: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName(
Context.getCanonicalType(Ty)));
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_ConstructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(Ty)));
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_ConstructorTemplateId: {
// In well-formed code, we can only have a constructor
// template-id that refers to the current context, so go there
// to find the actual type being constructed.
CXXRecordDecl *CurClass = dyn_cast<CXXRecordDecl>(CurContext);
if (!CurClass || CurClass->getIdentifier() != Name.TemplateId->Name)
return DeclarationNameInfo();
// Determine the type of the class being constructed.
QualType CurClassType = Context.getTypeDeclType(CurClass);
// FIXME: Check two things: that the template-id names the same type as
// CurClassType, and that the template-id does not occur when the name
// was qualified.
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(CurClassType)));
// FIXME: should we retrieve TypeSourceInfo?
NameInfo.setNamedTypeInfo(nullptr);
return NameInfo;
}
case UnqualifiedIdKind::IK_DestructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(Ty)));
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_TemplateId: {
TemplateName TName = Name.TemplateId->Template.get();
SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc;
return Context.getNameForTemplate(TName, TNameLoc);
}
} // switch (Name.getKind())
llvm_unreachable("Unknown name kind");
}
static QualType getCoreType(QualType Ty) {
do {
if (Ty->isPointerType() || Ty->isReferenceType())
Ty = Ty->getPointeeType();
else if (Ty->isArrayType())
Ty = Ty->castAsArrayTypeUnsafe()->getElementType();
else
return Ty.withoutLocalFastQualifiers();
} while (true);
}
/// hasSimilarParameters - Determine whether the C++ functions Declaration
/// and Definition have "nearly" matching parameters. This heuristic is
/// used to improve diagnostics in the case where an out-of-line function
/// definition doesn't match any declaration within the class or namespace.
/// Also sets Params to the list of indices to the parameters that differ
/// between the declaration and the definition. If hasSimilarParameters
/// returns true and Params is empty, then all of the parameters match.
static bool hasSimilarParameters(ASTContext &Context,
FunctionDecl *Declaration,
FunctionDecl *Definition,
SmallVectorImpl<unsigned> &Params) {
Params.clear();
if (Declaration->param_size() != Definition->param_size())
return false;
for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) {
QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType();
QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
// The parameter types are identical
if (Context.hasSameUnqualifiedType(DefParamTy, DeclParamTy))
continue;
QualType DeclParamBaseTy = getCoreType(DeclParamTy);
QualType DefParamBaseTy = getCoreType(DefParamTy);
const IdentifierInfo *DeclTyName = DeclParamBaseTy.getBaseTypeIdentifier();
const IdentifierInfo *DefTyName = DefParamBaseTy.getBaseTypeIdentifier();
if (Context.hasSameUnqualifiedType(DeclParamBaseTy, DefParamBaseTy) ||
(DeclTyName && DeclTyName == DefTyName))
Params.push_back(Idx);
else // The two parameters aren't even close
return false;
}
return true;
}
/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
/// declarator needs to be rebuilt in the current instantiation.
/// Any bits of declarator which appear before the name are valid for
/// consideration here. That's specifically the type in the decl spec
/// and the base type in any member-pointer chunks.
static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
DeclarationName Name) {
// The types we specifically need to rebuild are:
// - typenames, typeofs, and decltypes
// - types which will become injected class names
// Of course, we also need to rebuild any type referencing such a
// type. It's safest to just say "dependent", but we call out a
// few cases here.
DeclSpec &DS = D.getMutableDeclSpec();
switch (DS.getTypeSpecType()) {
case DeclSpec::TST_typename:
case DeclSpec::TST_typeofType:
case DeclSpec::TST_underlyingType:
case DeclSpec::TST_atomic: {
// Grab the type from the parser.
TypeSourceInfo *TSI = nullptr;
QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI);
if (T.isNull() || !T->isDependentType()) break;
// Make sure there's a type source info. This isn't really much
// of a waste; most dependent types should have type source info
// attached already.
if (!TSI)
TSI = S.Context.getTrivialTypeSourceInfo(T, DS.getTypeSpecTypeLoc());
// Rebuild the type in the current instantiation.
TSI = S.RebuildTypeInCurrentInstantiation(TSI, D.getIdentifierLoc(), Name);
if (!TSI) return true;
// Store the new type back in the decl spec.
ParsedType LocType = S.CreateParsedType(TSI->getType(), TSI);
DS.UpdateTypeRep(LocType);
break;
}
case DeclSpec::TST_decltype:
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
if (Result.isInvalid()) return true;
DS.UpdateExprRep(Result.get());
break;
}
default:
// Nothing to do for these decl specs.
break;
}
// It doesn't matter what order we do this in.
for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
DeclaratorChunk &Chunk = D.getTypeObject(I);
// The only type information in the declarator which can come
// before the declaration name is the base type of a member
// pointer.
if (Chunk.Kind != DeclaratorChunk::MemberPointer)
continue;
// Rebuild the scope specifier in-place.
CXXScopeSpec &SS = Chunk.Mem.Scope();
if (S.RebuildNestedNameSpecifierInCurrentInstantiation(SS))
return true;
}
return false;
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FDK_Declaration);
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
if (getLangOpts().OpenCL)
setCurrentOpenCLExtensionForDecl(Dcl);
return Dcl;
}
/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
/// If T is the name of a class, then each of the following shall have a
/// name different from T:
/// - every static data member of class T;
/// - every member function of class T
/// - every member of class T that is itself a type;
/// \returns true if the declaration name violates these rules.
bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
DeclarationNameInfo NameInfo) {
DeclarationName Name = NameInfo.getName();
CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC);
while (Record && Record->isAnonymousStructOrUnion())
Record = dyn_cast<CXXRecordDecl>(Record->getParent());
if (Record && Record->getIdentifier() && Record->getDeclName() == Name) {
Diag(NameInfo.getLoc(), diag::err_member_name_of_class) << Name;
return true;
}
return false;
}
/// Diagnose a declaration whose declarator-id has the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier of the declarator-id.
///
/// \param DC The declaration context to which the nested-name-specifier
/// resolves.
///
/// \param Name The name of the entity being declared.
///
/// \param Loc The location of the name of the entity being declared.
///
/// \param IsTemplateId Whether the name is a (simple-)template-id, and thus
/// we're declaring an explicit / partial specialization / instantiation.
///
/// \returns true if we cannot safely recover from this error, false otherwise.
bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc, bool IsTemplateId) {
DeclContext *Cur = CurContext;
while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
Cur = Cur->getParent();
// If the user provided a superfluous scope specifier that refers back to the
// class in which the entity is already declared, diagnose and ignore it.
//
// class X {
// void X::f();
// };
//
// Note, it was once ill-formed to give redundant qualification in all
// contexts, but that rule was removed by DR482.
if (Cur->Equals(DC)) {
if (Cur->isRecord()) {
Diag(Loc, LangOpts.MicrosoftExt ? diag::warn_member_extra_qualification
: diag::err_member_extra_qualification)
<< Name << FixItHint::CreateRemoval(SS.getRange());
SS.clear();
} else {
Diag(Loc, diag::warn_namespace_member_extra_qualification) << Name;
}
return false;
}
// Check whether the qualifying scope encloses the scope of the original
// declaration. For a template-id, we perform the checks in
// CheckTemplateSpecializationScope.
if (!Cur->Encloses(DC) && !IsTemplateId) {
if (Cur->isRecord())
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
else if (isa<TranslationUnitDecl>(DC))
Diag(Loc, diag::err_invalid_declarator_global_scope)
<< Name << SS.getRange();
else if (isa<FunctionDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_function)
<< Name << SS.getRange();
else if (isa<BlockDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_block)
<< Name << SS.getRange();
else
Diag(Loc, diag::err_invalid_declarator_scope)
<< Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
return true;
}
if (Cur->isRecord()) {
// Cannot qualify members within a class.
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
SS.clear();
// C++ constructors and destructors with incorrect scopes can break
// our AST invariants by having the wrong underlying types. If
// that's the case, then drop this declaration entirely.
if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
Name.getNameKind() == DeclarationName::CXXDestructorName) &&
!Context.hasSameType(Name.getCXXNameType(),
Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
return true;
return false;
}
// C++11 [dcl.meaning]p1:
// [...] "The nested-name-specifier of the qualified declarator-id shall
// not begin with a decltype-specifer"
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
while (SpecLoc.getPrefix())
SpecLoc = SpecLoc.getPrefix();
if (dyn_cast_or_null<DecltypeType>(
SpecLoc.getNestedNameSpecifier()->getAsType()))
Diag(Loc, diag::err_decltype_in_declarator)
<< SpecLoc.getTypeLoc().getSourceRange();
return false;
}
NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists) {
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
// All of these full declarators require an identifier. If it doesn't have
// one, the ParsedFreeStandingDeclSpec action should be used.
if (D.isDecompositionDeclarator()) {
return ActOnDecompositionDeclarator(S, D, TemplateParamLists);
} else if (!Name) {
if (!D.isInvalidType()) // Reject this if we think it is valid.
Diag(D.getDeclSpec().getBeginLoc(), diag::err_declarator_need_ident)
<< D.getDeclSpec().getSourceRange() << D.getSourceRange();
return nullptr;
} else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
return nullptr;
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
while ((S->getFlags() & Scope::DeclScope) == 0 ||
(S->getFlags() & Scope::TemplateParamScope) != 0)
S = S->getParent();
DeclContext *DC = CurContext;
if (D.getCXXScopeSpec().isInvalid())
D.setInvalidType();
else if (D.getCXXScopeSpec().isSet()) {
if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(),
UPPC_DeclarationQualifier))
return nullptr;
bool EnteringContext = !D.getDeclSpec().isFriendSpecified();
DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext);
if (!DC || isa<EnumDecl>(DC)) {
// If we could not compute the declaration context, it's because the
// declaration context is dependent but does not refer to a class,
// class template, or class template partial specialization. Complain
// and return early, to avoid the coming semantic disaster.
Diag(D.getIdentifierLoc(),
diag::err_template_qualified_declarator_no_match)
<< D.getCXXScopeSpec().getScopeRep()
<< D.getCXXScopeSpec().getRange();
return nullptr;
}
bool IsDependentContext = DC->isDependentContext();
if (!IsDependentContext &&
RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
return nullptr;
// If a class is incomplete, do not parse entities inside it.
if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
Diag(D.getIdentifierLoc(),
diag::err_member_def_undefined_record)
<< Name << DC << D.getCXXScopeSpec().getRange();
return nullptr;
}
if (!D.getDeclSpec().isFriendSpecified()) {
if (diagnoseQualifiedDeclaration(
D.getCXXScopeSpec(), DC, Name, D.getIdentifierLoc(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId)) {
if (DC->isRecord())
return nullptr;
D.setInvalidType();
}
}
// Check whether we need to rebuild the type of the given
// declaration in the current instantiation.
if (EnteringContext && IsDependentContext &&
TemplateParamLists.size() != 0) {
ContextRAII SavedContext(*this, DC);
if (RebuildDeclaratorInCurrentInstantiation(*this, D, Name))
D.setInvalidType();
}
}
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType R = TInfo->getType();
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
UPPC_DeclarationType))
D.setInvalidType();
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
forRedeclarationInCurContext());
// See if this is a redefinition of a variable in the same scope.
if (!D.getCXXScopeSpec().isSet()) {
bool IsLinkageLookup = false;
bool CreateBuiltins = false;
// If the declaration we're planning to build will be a function
// or object with linkage, then look for another declaration with
// linkage (C99 6.2.2p4-5 and C++ [basic.link]p6).
//
// If the declaration we're planning to build will be declared with
// external linkage in the translation unit, create any builtin with
// the same name.
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
/* Do nothing*/;
else if (CurContext->isFunctionOrMethod() &&
(D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern ||
R->isFunctionType())) {
IsLinkageLookup = true;
CreateBuiltins =
CurContext->getEnclosingNamespaceContext()->isTranslationUnit();
} else if (CurContext->getRedeclContext()->isTranslationUnit() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)
CreateBuiltins = true;
if (IsLinkageLookup) {
Previous.clear(LookupRedeclarationWithLinkage);
Previous.setRedeclarationKind(ForExternalRedeclaration);
}
LookupName(Previous, S, CreateBuiltins);
} else { // Something like "int foo::x;"
LookupQualifiedName(Previous, DC);
// C++ [dcl.meaning]p1:
// When the declarator-id is qualified, the declaration shall refer to a
// previously declared member of the class or namespace to which the
// qualifier refers (or, in the case of a namespace, of an element of the
// inline namespace set of that namespace (7.3.1)) or to a specialization
// thereof; [...]
//
// Note that we already checked the context above, and that we do not have
// enough information to make sure that Previous contains the declaration
// we want to match. For example, given:
//
// class X {
// void f();
// void f(float);
// };
//
// void X::f(int) { } // ill-formed
//
// In this case, Previous will point to the overload set
// containing the two f's declared in X, but neither of them
// matches.
// C++ [dcl.meaning]p1:
// [...] the member shall not merely have been introduced by a
// using-declaration in the scope of the class or namespace nominated by
// the nested-name-specifier of the declarator-id.
RemoveUsingDecls(Previous);
}
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
if (!D.isInvalidType())
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
Previous.getFoundDecl());
// Just pretend that we didn't see the previous declaration.
Previous.clear();
}
if (!R->isFunctionType() && DiagnoseClassNameShadow(DC, NameInfo))
// Forget that the previous declaration is the injected-class-name.
Previous.clear();
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
// tag type. Note that this applies to functions, function templates, and
// variables, but not to typedefs (C++ [dcl.typedef]p4) or variable templates.
if (Previous.isSingleTagDecl() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
(TemplateParamLists.size() == 0 || R->isFunctionType()))
Previous.clear();
// Check that there are no default arguments other than in the parameters
// of a function declaration (C++ only).
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
NamedDecl *New;
bool AddToScope = true;
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
if (TemplateParamLists.size()) {
Diag(D.getIdentifierLoc(), diag::err_template_typedef);
return nullptr;
}
New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous);
} else if (R->isFunctionType()) {
New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous,
TemplateParamLists,
AddToScope);
} else {
New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous, TemplateParamLists,
AddToScope);
}
if (!New)
return nullptr;
// If this has an identifier and is not a function template specialization,
// add it to the scope stack.
if (New->getDeclName() && AddToScope)
PushOnScopeChains(New, S);
if (isInOpenMPDeclareTargetContext())
checkDeclIsAllowedInOpenMPTarget(nullptr, New);
return New;
}
/// Helper method to turn variable array types into constant array
/// types in certain situations which would otherwise be errors (for
/// GCC compatibility).
static QualType TryToFixInvalidVariablyModifiedType(QualType T,
ASTContext &Context,
bool &SizeIsNegative,
llvm::APSInt &Oversized) {
// This method tries to turn a variable array into a constant
// array even when the size isn't an ICE. This is necessary
// for compatibility with code that depends on gcc's buggy
// constant expression folding, like struct {char x[(int)(char*)2];}
SizeIsNegative = false;
Oversized = 0;
if (T->isDependentType())
return QualType();
QualifierCollector Qs;
const Type *Ty = Qs.strip(T);
if (const PointerType* PTy = dyn_cast<PointerType>(Ty)) {
QualType Pointee = PTy->getPointeeType();
QualType FixedType =
TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative,
Oversized);
if (FixedType.isNull()) return FixedType;
FixedType = Context.getPointerType(FixedType);
return Qs.apply(Context, FixedType);
}
if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) {
QualType Inner = PTy->getInnerType();
QualType FixedType =
TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative,
Oversized);
if (FixedType.isNull()) return FixedType;
FixedType = Context.getParenType(FixedType);
return Qs.apply(Context, FixedType);
}
const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
if (!VLATy)
return QualType();
// FIXME: We should probably handle this case
if (VLATy->getElementType()->isVariablyModifiedType())
return QualType();
Expr::EvalResult Result;
if (!VLATy->getSizeExpr() ||
!VLATy->getSizeExpr()->EvaluateAsInt(Result, Context))
return QualType();
llvm::APSInt Res = Result.Val.getInt();
// Check whether the array size is negative.
if (Res.isSigned() && Res.isNegative()) {
SizeIsNegative = true;
return QualType();
}
// Check whether the array is too large to be addressed.
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(),
Res);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Oversized = Res;
return QualType();
}
return Context.getConstantArrayType(VLATy->getElementType(),
Res, ArrayType::Normal, 0);
}
static void
FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) {
SrcTL = SrcTL.getUnqualifiedLoc();
DstTL = DstTL.getUnqualifiedLoc();
if (PointerTypeLoc SrcPTL = SrcTL.getAs<PointerTypeLoc>()) {
PointerTypeLoc DstPTL = DstTL.castAs<PointerTypeLoc>();
FixInvalidVariablyModifiedTypeLoc(SrcPTL.getPointeeLoc(),
DstPTL.getPointeeLoc());
DstPTL.setStarLoc(SrcPTL.getStarLoc());
return;
}
if (ParenTypeLoc SrcPTL = SrcTL.getAs<ParenTypeLoc>()) {
ParenTypeLoc DstPTL = DstTL.castAs<ParenTypeLoc>();
FixInvalidVariablyModifiedTypeLoc(SrcPTL.getInnerLoc(),
DstPTL.getInnerLoc());
DstPTL.setLParenLoc(SrcPTL.getLParenLoc());
DstPTL.setRParenLoc(SrcPTL.getRParenLoc());
return;
}
ArrayTypeLoc SrcATL = SrcTL.castAs<ArrayTypeLoc>();
ArrayTypeLoc DstATL = DstTL.castAs<ArrayTypeLoc>();
TypeLoc SrcElemTL = SrcATL.getElementLoc();
TypeLoc DstElemTL = DstATL.getElementLoc();
DstElemTL.initializeFullCopy(SrcElemTL);
DstATL.setLBracketLoc(SrcATL.getLBracketLoc());
DstATL.setSizeExpr(SrcATL.getSizeExpr());
DstATL.setRBracketLoc(SrcATL.getRBracketLoc());
}
/// Helper method to turn variable array types into constant array
/// types in certain situations which would otherwise be errors (for
/// GCC compatibility).
static TypeSourceInfo*
TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
ASTContext &Context,
bool &SizeIsNegative,
llvm::APSInt &Oversized) {
QualType FixedTy
= TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context,
SizeIsNegative, Oversized);
if (FixedTy.isNull())
return nullptr;
TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy);
FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(),
FixedTInfo->getTypeLoc());
return FixedTInfo;
}
/// Register the given locally-scoped extern "C" declaration so
/// that it can be found later for redeclarations. We include any extern "C"
/// declaration that is not visible in the translation unit here, not just
/// function-scope declarations.
void
Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) {
if (!getLangOpts().CPlusPlus &&
ND->getLexicalDeclContext()->getRedeclContext()->isTranslationUnit())
// Don't need to track declarations in the TU in C.
return;
// Note that we have a locally-scoped external with this name.
Context.getExternCContextDecl()->makeDeclVisibleInContext(ND);
}
NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) {
// FIXME: We can have multiple results via __attribute__((overloadable)).
auto Result = Context.getExternCContextDecl()->lookup(Name);
return Result.empty() ? nullptr : *Result.begin();
}
/// Diagnose function specifiers on a declaration of an identifier that
/// does not identify a function.
void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
// FIXME: We should probably indicate the identifier in question to avoid
// confusion for constructs like "virtual int a(), b;"
if (DS.isVirtualSpecified())
Diag(DS.getVirtualSpecLoc(),
diag::err_virtual_non_function);
if (DS.hasExplicitSpecifier())
Diag(DS.getExplicitSpecLoc(),
diag::err_explicit_non_function);
if (DS.isNoreturnSpecified())
Diag(DS.getNoreturnSpecLoc(),
diag::err_noreturn_non_function);
}
NamedDecl*
Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo, LookupResult &Previous) {
// Typedef declarators cannot be qualified (C++ [dcl.meaning]p1).
if (D.getCXXScopeSpec().isSet()) {
Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator)
<< D.getCXXScopeSpec().getRange();
D.setInvalidType();
// Pretend we didn't see the scope specifier.
DC = CurContext;
Previous.clear();
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (D.getDeclSpec().hasConstexprSpecifier())
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 1 << (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval);
if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
Diag(D.getName().StartLocation,
diag::err_deduction_guide_invalid_specifier)
<< "typedef";
else
Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
<< D.getName().getSourceRange();
return nullptr;
}
TypedefDecl *NewTD = ParseTypedefDecl(S, D, TInfo->getType(), TInfo);
if (!NewTD) return nullptr;
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewTD, D);
CheckTypedefForVariablyModifiedType(S, NewTD);
bool Redeclaration = D.isRedeclaration();
NamedDecl *ND = ActOnTypedefNameDecl(S, DC, NewTD, Previous, Redeclaration);
D.setRedeclaration(Redeclaration);
return ND;
}
void
Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
// C99 6.7.7p2: If a typedef name specifies a variably modified type
// then it shall have block scope.
// Note that variably modified types must be fixed before merging the decl so
// that redeclarations will match.
TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo();
QualType T = TInfo->getType();
if (T->isVariablyModifiedType()) {
setFunctionHasBranchProtectedScope();
if (S->getFnParent() == nullptr) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo =
TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
SizeIsNegative,
Oversized);
if (FixedTInfo) {
Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
NewTD->setTypeSourceInfo(FixedTInfo);
} else {
if (SizeIsNegative)
Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size);
else if (T->isVariableArrayType())
Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
else if (Oversized.getBoolValue())
Diag(NewTD->getLocation(), diag::err_array_too_large)
<< Oversized.toString(10);
else
Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
NewTD->setInvalidDecl();
}
}
}
}
/// ActOnTypedefNameDecl - Perform semantic checking for a declaration which
/// declares a typedef-name, either using the 'typedef' type specifier or via
/// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'.
NamedDecl*
Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
LookupResult &Previous, bool &Redeclaration) {
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = getShadowedDeclaration(NewTD, Previous);
// Merge the decl with the existing one if appropriate. If the decl is
// in an outer scope, it isn't the same thing.
FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false,
/*AllowInlineNamespace*/false);
filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous);
if (!Previous.empty()) {
Redeclaration = true;
MergeTypedefNameDecl(S, NewTD, Previous);
}
if (ShadowedDecl && !Redeclaration)
CheckShadow(NewTD, ShadowedDecl, Previous);
// If this is the C FILE type, notify the AST context.
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (II->isStr("FILE"))
Context.setFILEDecl(NewTD);
else if (II->isStr("jmp_buf"))
Context.setjmp_bufDecl(NewTD);
else if (II->isStr("sigjmp_buf"))
Context.setsigjmp_bufDecl(NewTD);
else if (II->isStr("ucontext_t"))
Context.setucontext_tDecl(NewTD);
}
return NewTD;
}
/// Determines whether the given declaration is an out-of-scope
/// previous declaration.
///
/// This routine should be invoked when name lookup has found a
/// previous declaration (PrevDecl) that is not in the scope where a
/// new declaration by the same name is being introduced. If the new
/// declaration occurs in a local scope, previous declarations with
/// linkage may still be considered previous declarations (C99
/// 6.2.2p4-5, C++ [basic.link]p6).
///
/// \param PrevDecl the previous declaration found by name
/// lookup
///
/// \param DC the context in which the new declaration is being
/// declared.
///
/// \returns true if PrevDecl is an out-of-scope previous declaration
/// for a new delcaration with the same name.
static bool
isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
ASTContext &Context) {
if (!PrevDecl)
return false;
if (!PrevDecl->hasLinkage())
return false;
if (Context.getLangOpts().CPlusPlus) {
// C++ [basic.link]p6:
// If there is a visible declaration of an entity with linkage
// having the same name and type, ignoring entities declared
// outside the innermost enclosing namespace scope, the block
// scope declaration declares that same entity and receives the
// linkage of the previous declaration.
DeclContext *OuterContext = DC->getRedeclContext();
if (!OuterContext->isFunctionOrMethod())
// This rule only applies to block-scope declarations.
return false;
DeclContext *PrevOuterContext = PrevDecl->getDeclContext();
if (PrevOuterContext->isRecord())
// We found a member function: ignore it.
return false;
// Find the innermost enclosing namespace for the new and
// previous declarations.
OuterContext = OuterContext->getEnclosingNamespaceContext();
PrevOuterContext = PrevOuterContext->getEnclosingNamespaceContext();
// The previous declaration is in a different namespace, so it
// isn't the same function.
if (!OuterContext->Equals(PrevOuterContext))
return false;
}
return true;
}
static void SetNestedNameSpecifier(Sema &S, DeclaratorDecl *DD, Declarator &D) {
CXXScopeSpec &SS = D.getCXXScopeSpec();
if (!SS.isSet()) return;
DD->setQualifierInfo(SS.getWithLocInContext(S.Context));
}
bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
QualType type = decl->getType();
Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
if (lifetime == Qualifiers::OCL_Autoreleasing) {
// Various kinds of declaration aren't allowed to be __autoreleasing.
unsigned kind = -1U;
if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
if (var->hasAttr<BlocksAttr>())
kind = 0; // __block
else if (!var->hasLocalStorage())
kind = 1; // global
} else if (isa<ObjCIvarDecl>(decl)) {
kind = 3; // ivar
} else if (isa<FieldDecl>(decl)) {
kind = 2; // field
}
if (kind != -1U) {
Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
<< kind;
}
} else if (lifetime == Qualifiers::OCL_None) {
// Try to infer lifetime.
if (!type->isObjCLifetimeType())
return false;
lifetime = type->getObjCARCImplicitLifetime();
type = Context.getLifetimeQualifiedType(type, lifetime);
decl->setType(type);
}
if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
// Thread-local variables cannot have lifetime.
if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
var->getTLSKind()) {
Diag(var->getLocation(), diag::err_arc_thread_ownership)
<< var->getType();
return true;
}
}
return false;
}
static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
// Ensure that an auto decl is deduced otherwise the checks below might cache
// the wrong linkage.
assert(S.ParsingInitForAutoVars.count(&ND) == 0);
// 'weak' only applies to declarations with external linkage.
if (WeakAttr *Attr = ND.getAttr<WeakAttr>()) {
if (!ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(), diag::err_attribute_weak_static);
ND.dropAttr<WeakAttr>();
}
}
if (WeakRefAttr *Attr = ND.getAttr<WeakRefAttr>()) {
if (ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static);
ND.dropAttr<WeakRefAttr>();
ND.dropAttr<AliasAttr>();
}
}
if (auto *VD = dyn_cast<VarDecl>(&ND)) {
if (VD->hasInit()) {
if (const auto *Attr = VD->getAttr<AliasAttr>()) {
assert(VD->isThisDeclarationADefinition() &&
!VD->isExternallyVisible() && "Broken AliasAttr handled late!");
S.Diag(Attr->getLocation(), diag::err_alias_is_definition) << VD << 0;
VD->dropAttr<AliasAttr>();
}
}
}
// 'selectany' only applies to externally visible variable declarations.
// It does not apply to functions.
if (SelectAnyAttr *Attr = ND.getAttr<SelectAnyAttr>()) {
if (isa<FunctionDecl>(ND) || !ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(),
diag::err_attribute_selectany_non_extern_data);
ND.dropAttr<SelectAnyAttr>();
}
}
if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
auto *VD = dyn_cast<VarDecl>(&ND);
bool IsAnonymousNS = false;
bool IsMicrosoft = S.Context.getTargetInfo().getCXXABI().isMicrosoft();
if (VD) {
const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(VD->getDeclContext());
while (NS && !IsAnonymousNS) {
IsAnonymousNS = NS->isAnonymousNamespace();
NS = dyn_cast<NamespaceDecl>(NS->getParent());
}
}
// dll attributes require external linkage. Static locals may have external
// linkage but still cannot be explicitly imported or exported.
// In Microsoft mode, a variable defined in anonymous namespace must have
// external linkage in order to be exported.
bool AnonNSInMicrosoftMode = IsAnonymousNS && IsMicrosoft;
if ((ND.isExternallyVisible() && AnonNSInMicrosoftMode) ||
(!AnonNSInMicrosoftMode &&
(!ND.isExternallyVisible() || (VD && VD->isStaticLocal())))) {
S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
<< &ND << Attr;
ND.setInvalidDecl();
}
}
// Virtual functions cannot be marked as 'notail'.
if (auto *Attr = ND.getAttr<NotTailCalledAttr>())
if (auto *MD = dyn_cast<CXXMethodDecl>(&ND))
if (MD->isVirtual()) {
S.Diag(ND.getLocation(),
diag::err_invalid_attribute_on_virtual_function)
<< Attr;
ND.dropAttr<NotTailCalledAttr>();
}
// Check the attributes on the function type, if any.
if (const auto *FD = dyn_cast<FunctionDecl>(&ND)) {
// Don't declare this variable in the second operand of the for-statement;
// GCC miscompiles that by ending its lifetime before evaluating the
// third operand. See gcc.gnu.org/PR86769.
AttributedTypeLoc ATL;
for (TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc();
(ATL = TL.getAsAdjusted<AttributedTypeLoc>());
TL = ATL.getModifiedLoc()) {
// The [[lifetimebound]] attribute can be applied to the implicit object
// parameter of a non-static member function (other than a ctor or dtor)
// by applying it to the function type.
if (const auto *A = ATL.getAttrAs<LifetimeBoundAttr>()) {
const auto *MD = dyn_cast<CXXMethodDecl>(FD);
if (!MD || MD->isStatic()) {
S.Diag(A->getLocation(), diag::err_lifetimebound_no_object_param)
<< !MD << A->getRange();
} else if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) {
S.Diag(A->getLocation(), diag::err_lifetimebound_ctor_dtor)
<< isa<CXXDestructorDecl>(MD) << A->getRange();
}
}
}
}
}
static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
NamedDecl *NewDecl,
bool IsSpecialization,
bool IsDefinition) {
if (OldDecl->isInvalidDecl() || NewDecl->isInvalidDecl())
return;
bool IsTemplate = false;
if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl)) {
OldDecl = OldTD->getTemplatedDecl();
IsTemplate = true;
if (!IsSpecialization)
IsDefinition = false;
}
if (TemplateDecl *NewTD = dyn_cast<TemplateDecl>(NewDecl)) {
NewDecl = NewTD->getTemplatedDecl();
IsTemplate = true;
}
if (!OldDecl || !NewDecl)
return;
const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>();
const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>();
const DLLImportAttr *NewImportAttr = NewDecl->getAttr<DLLImportAttr>();
const DLLExportAttr *NewExportAttr = NewDecl->getAttr<DLLExportAttr>();
// dllimport and dllexport are inheritable attributes so we have to exclude
// inherited attribute instances.
bool HasNewAttr = (NewImportAttr && !NewImportAttr->isInherited()) ||
(NewExportAttr && !NewExportAttr->isInherited());
// A redeclaration is not allowed to add a dllimport or dllexport attribute,
// the only exception being explicit specializations.
// Implicitly generated declarations are also excluded for now because there
// is no other way to switch these to use dllimport or dllexport.
bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
// Allow with a warning for free functions and global variables.
bool JustWarn = false;
if (!OldDecl->isCXXClassMember()) {
auto *VD = dyn_cast<VarDecl>(OldDecl);
if (VD && !VD->getDescribedVarTemplate())
JustWarn = true;
auto *FD = dyn_cast<FunctionDecl>(OldDecl);
if (FD && FD->getTemplatedKind() == FunctionDecl::TK_NonTemplate)
JustWarn = true;
}
// We cannot change a declaration that's been used because IR has already
// been emitted. Dllimported functions will still work though (modulo
// address equality) as they can use the thunk.
if (OldDecl->isUsed())
if (!isa<FunctionDecl>(OldDecl) || !NewImportAttr)
JustWarn = false;
unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration
: diag::err_attribute_dll_redeclaration;
S.Diag(NewDecl->getLocation(), DiagID)
<< NewDecl
<< (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
if (!JustWarn) {
NewDecl->setInvalidDecl();
return;
}
}
// A redeclaration is not allowed to drop a dllimport attribute, the only
// exceptions being inline function definitions (except for function
// templates), local extern declarations, qualified friend declarations or
// special MSVC extension: in the last case, the declaration is treated as if
// it were marked dllexport.
bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false;
bool IsMicrosoft = S.Context.getTargetInfo().getCXXABI().isMicrosoft();
if (const auto *VD = dyn_cast<VarDecl>(NewDecl)) {
// Ignore static data because out-of-line definitions are diagnosed
// separately.
IsStaticDataMember = VD->isStaticDataMember();
IsDefinition = VD->isThisDeclarationADefinition(S.Context) !=
VarDecl::DeclarationOnly;
} else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl)) {
IsInline = FD->isInlined();
IsQualifiedFriend = FD->getQualifier() &&
FD->getFriendObjectKind() == Decl::FOK_Declared;
}
if (OldImportAttr && !HasNewAttr &&
(!IsInline || (IsMicrosoft && IsTemplate)) && !IsStaticDataMember &&
!NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
if (IsMicrosoft && IsDefinition) {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_import_attribute)
<< NewDecl;
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
NewDecl->dropAttr<DLLImportAttr>();
NewDecl->addAttr(::new (S.Context) DLLExportAttr(
NewImportAttr->getRange(), S.Context,
NewImportAttr->getSpellingListIndex()));
} else {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
<< NewDecl << OldImportAttr;
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute);
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
}
} else if (IsInline && OldImportAttr && !IsMicrosoft) {
// In MinGW, seeing a function declared inline drops the dllimport
// attribute.
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
S.Diag(NewDecl->getLocation(),
diag::warn_dllimport_dropped_from_inline_function)
<< NewDecl << OldImportAttr;
}
// A specialization of a class template member function is processed here
// since it's a redeclaration. If the parent class is dllexport, the
// specialization inherits that attribute. This doesn't happen automatically
// since the parent class isn't instantiated until later.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDecl)) {
if (MD->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization &&
!NewImportAttr && !NewExportAttr) {
if (const DLLExportAttr *ParentExportAttr =
MD->getParent()->getAttr<DLLExportAttr>()) {
DLLExportAttr *NewAttr = ParentExportAttr->clone(S.Context);
NewAttr->setInherited(true);
NewDecl->addAttr(NewAttr);
}
}
}
}
/// Given that we are within the definition of the given function,
/// will that definition behave like C99's 'inline', where the
/// definition is discarded except for optimization purposes?
static bool isFunctionDefinitionDiscarded(Sema &S, FunctionDecl *FD) {
// Try to avoid calling GetGVALinkageForFunction.
// All cases of this require the 'inline' keyword.
if (!FD->isInlined()) return false;
// This is only possible in C++ with the gnu_inline attribute.
if (S.getLangOpts().CPlusPlus && !FD->hasAttr<GNUInlineAttr>())
return false;
// Okay, go ahead and call the relatively-more-expensive function.
return S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally;
}
/// Determine whether a variable is extern "C" prior to attaching
/// an initializer. We can't just call isExternC() here, because that
/// will also compute and cache whether the declaration is externally
/// visible, which might change when we attach the initializer.
///
/// This can only be used if the declaration is known to not be a
/// redeclaration of an internal linkage declaration.
///
/// For instance:
///
/// auto x = []{};
///
/// Attaching the initializer here makes this declaration not externally
/// visible, because its type has internal linkage.
///
/// FIXME: This is a hack.
template<typename T>
static bool isIncompleteDeclExternC(Sema &S, const T *D) {
if (S.getLangOpts().CPlusPlus) {
// In C++, the overloadable attribute negates the effects of extern "C".
if (!D->isInExternCContext() || D->template hasAttr<OverloadableAttr>())
return false;
// So do CUDA's host/device attributes.
if (S.getLangOpts().CUDA && (D->template hasAttr<CUDADeviceAttr>() ||
D->template hasAttr<CUDAHostAttr>()))
return false;
}
return D->isExternC();
}
static bool shouldConsiderLinkage(const VarDecl *VD) {
const DeclContext *DC = VD->getDeclContext()->getRedeclContext();
if (DC->isFunctionOrMethod() || isa<OMPDeclareReductionDecl>(DC) ||
isa<OMPDeclareMapperDecl>(DC))
return VD->hasExternalStorage();
if (DC->isFileContext())
return true;
if (DC->isRecord())
return false;
llvm_unreachable("Unexpected context");
}
static bool shouldConsiderLinkage(const FunctionDecl *FD) {
const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
if (DC->isFileContext() || DC->isFunctionOrMethod() ||
isa<OMPDeclareReductionDecl>(DC) || isa<OMPDeclareMapperDecl>(DC))
return true;
if (DC->isRecord())
return false;
llvm_unreachable("Unexpected context");
}
static bool hasParsedAttr(Scope *S, const Declarator &PD,
ParsedAttr::Kind Kind) {
// Check decl attributes on the DeclSpec.
if (PD.getDeclSpec().getAttributes().hasAttribute(Kind))
return true;
// Walk the declarator structure, checking decl attributes that were in a type
// position to the decl itself.
for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) {
if (PD.getTypeObject(I).getAttrs().hasAttribute(Kind))
return true;
}
// Finally, check attributes on the decl itself.
return PD.getAttributes().hasAttribute(Kind);
}
/// Adjust the \c DeclContext for a function or variable that might be a
/// function-local external declaration.
bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
if (!DC->isFunctionOrMethod())
return false;
// If this is a local extern function or variable declared within a function
// template, don't add it into the enclosing namespace scope until it is
// instantiated; it might have a dependent type right now.
if (DC->isDependentContext())
return true;
// C++11 [basic.link]p7:
// When a block scope declaration of an entity with linkage is not found to
// refer to some other declaration, then that entity is a member of the
// innermost enclosing namespace.
//
// Per C++11 [namespace.def]p6, the innermost enclosing namespace is a
// semantically-enclosing namespace, not a lexically-enclosing one.
while (!DC->isFileContext() && !isa<LinkageSpecDecl>(DC))
DC = DC->getParent();
return true;
}
/// Returns true if given declaration has external C language linkage.
static bool isDeclExternC(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->isExternC();
if (const auto *VD = dyn_cast<VarDecl>(D))
return VD->isExternC();
llvm_unreachable("Unknown type of decl!");
}
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope, ArrayRef<BindingDecl *> Bindings) {
QualType R = TInfo->getType();
DeclarationName Name = GetNameForDeclarator(D).getName();
IdentifierInfo *II = Name.getAsIdentifierInfo();
if (D.isDecompositionDeclarator()) {
// Take the name of the first declarator as our name for diagnostic
// purposes.
auto &Decomp = D.getDecompositionDeclarator();
if (!Decomp.bindings().empty()) {
II = Decomp.bindings()[0].Name;
Name = II;
}
} else if (!II) {
Diag(D.getIdentifierLoc(), diag::err_bad_variable_name) << Name;
return nullptr;
}
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.9.b - Image type can only be used as a function argument.
// OpenCL v2.0 s6.13.16.1 - Pipe type can only be used as a function
// argument.
if (R->isImageType() || R->isPipeType()) {
Diag(D.getIdentifierLoc(),
diag::err_opencl_type_can_only_be_used_as_function_parameter)
<< R;
D.setInvalidType();
return nullptr;
}
// OpenCL v1.2 s6.9.r:
// The event type cannot be used to declare a program scope variable.
// OpenCL v2.0 s6.9.q:
// The clk_event_t and reserve_id_t types cannot be declared in program scope.
if (NULL == S->getParent()) {
if (R->isReserveIDT() || R->isClkEventT() || R->isEventT()) {
Diag(D.getIdentifierLoc(),
diag::err_invalid_type_for_program_scope_var) << R;
D.setInvalidType();
return nullptr;
}
}
// OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
QualType NR = R;
while (NR->isPointerType()) {
if (NR->isFunctionPointerType()) {
Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer);
D.setInvalidType();
break;
}
NR = NR->getPointeeType();
}
if (!getOpenCLOptions().isEnabled("cl_khr_fp16")) {
// OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
// half array type (unless the cl_khr_fp16 extension is enabled).
if (Context.getBaseElementType(R)->isHalfType()) {
Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
D.setInvalidType();
}
}
if (R->isSamplerT()) {
// OpenCL v1.2 s6.9.b p4:
// The sampler type cannot be used with the __local and __global address
// space qualifiers.
if (R.getAddressSpace() == LangAS::opencl_local ||
R.getAddressSpace() == LangAS::opencl_global) {
Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
}
// OpenCL v1.2 s6.12.14.1:
// A global sampler must be declared with either the constant address
// space qualifier or with the const qualifier.
if (DC->isTranslationUnit() &&
!(R.getAddressSpace() == LangAS::opencl_constant ||
R.isConstQualified())) {
Diag(D.getIdentifierLoc(), diag::err_opencl_nonconst_global_sampler);
D.setInvalidType();
}
}
// OpenCL v1.2 s6.9.r:
// The event type cannot be used with the __local, __constant and __global
// address space qualifiers.
if (R->isEventT()) {
if (R.getAddressSpace() != LangAS::opencl_private) {
Diag(D.getBeginLoc(), diag::err_event_t_addr_space_qual);
D.setInvalidType();
}
}
// C++ for OpenCL does not allow the thread_local storage qualifier.
// OpenCL C does not support thread_local either, and
// also reject all other thread storage class specifiers.
DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
if (TSC != TSCS_unspecified) {
bool IsCXX = getLangOpts().OpenCLCPlusPlus;
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_opencl_unknown_type_specifier)
<< IsCXX << getLangOpts().getOpenCLVersionTuple().getAsString()
<< DeclSpec::getSpecifierName(TSC) << 1;
D.setInvalidType();
return nullptr;
}
}
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
// dllimport globals without explicit storage class are treated as extern. We
// have to change the storage class this early to get the right DeclContext.
if (SC == SC_None && !DC->isRecord() &&
hasParsedAttr(S, D, ParsedAttr::AT_DLLImport) &&
!hasParsedAttr(S, D, ParsedAttr::AT_DLLExport))
SC = SC_Extern;
DeclContext *OriginalDC = DC;
bool IsLocalExternDecl = SC == SC_Extern &&
adjustContextForLocalExternDecl(DC);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember);
D.setInvalidType();
SC = SC_None;
}
if (getLangOpts().CPlusPlus11 && SCSpec == DeclSpec::SCS_register &&
!D.getAsmLabel() && !getSourceManager().isInSystemMacro(
D.getDeclSpec().getStorageClassSpecLoc())) {
// In C++11, the 'register' storage class specifier is deprecated.
// Suppress the warning in system macros, it's used in macros in some
// popular C system headers, such as in glibc's htonl() macro.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::ext_register_storage_class
: diag::warn_deprecated_register)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (!DC->isRecord() && S->getFnParent() == nullptr) {
// C99 6.9p2: The storage-class specifiers auto and register shall not
// appear in the declaration specifiers in an external declaration.
// Global Register+Asm is a GNU extension we support.
if (SC == SC_Auto || (SC == SC_Register && !D.getAsmLabel())) {
Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope);
D.setInvalidType();
}
}
bool IsMemberSpecialization = false;
bool IsVariableTemplateSpecialization = false;
bool IsPartialSpecialization = false;
bool IsVariableTemplate = false;
VarDecl *NewVD = nullptr;
VarTemplateDecl *NewTemplate = nullptr;
TemplateParameterList *TemplateParams = nullptr;
if (!getLangOpts().CPlusPlus) {
NewVD = VarDecl::Create(Context, DC, D.getBeginLoc(), D.getIdentifierLoc(),
II, R, TInfo, SC);
if (R->getContainedDeducedType())
ParsingInitForAutoVars.insert(NewVD);
if (D.isInvalidType())
NewVD->setInvalidDecl();
if (NewVD->getType().hasNonTrivialToPrimitiveDestructCUnion() &&
NewVD->hasLocalStorage())
checkNonTrivialCUnion(NewVD->getType(), NewVD->getLocation(),
NTCUC_AutoVar, NTCUK_Destruct);
} else {
bool Invalid = false;
if (DC->isRecord() && !CurContext->isRecord()) {
// This is an out-of-line definition of a static data member.
switch (SC) {
case SC_None:
break;
case SC_Static:
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
break;
case SC_Auto:
case SC_Register:
case SC_Extern:
// [dcl.stc] p2: The auto or register specifiers shall be applied only
// to names of variables declared in a block or to function parameters.
// [dcl.stc] p6: The extern specifier cannot be used in the declaration
// of class members
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_storage_class_for_static_member)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
break;
case SC_PrivateExtern:
llvm_unreachable("C storage class in c++!");
}
}
if (SC == SC_Static && CurContext->isRecord()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
if (RD->isLocalClass())
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
<< Name << RD->getDeclName();
// C++98 [class.union]p1: If a union contains a static data member,
// the program is ill-formed. C++11 drops this restriction.
if (RD->isUnion())
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_static_data_member_in_union
: diag::ext_static_data_member_in_union) << Name;
// We conservatively disallow static data members in anonymous structs.
else if (!RD->getDeclName())
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_anon_struct)
<< Name << RD->isUnion();
}
}
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
/*never a friend*/ false, IsMemberSpecialization, Invalid);
if (TemplateParams) {
if (!TemplateParams->size() &&
D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// There is an extraneous 'template<>' for this variable. Complain
// about it, but allow the declaration of the variable.
Diag(TemplateParams->getTemplateLoc(),
diag::err_template_variable_noparams)
<< II
<< SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc());
TemplateParams = nullptr;
} else {
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
// This is an explicit specialization or a partial specialization.
// FIXME: Check that we can declare a specialization here.
IsVariableTemplateSpecialization = true;
IsPartialSpecialization = TemplateParams->size() > 0;
} else { // if (TemplateParams->size() > 0)
// This is a template declaration.
IsVariableTemplate = true;
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
return nullptr;
// Only C++1y supports variable templates (N3651).
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_variable_template
: diag::ext_variable_template);
}
}
} else {
assert((Invalid ||
D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) &&
"should have a 'template<>' for this decl");
}
if (IsVariableTemplateSpecialization) {
SourceLocation TemplateKWLoc =
TemplateParamLists.size() > 0
? TemplateParamLists[0]->getTemplateLoc()
: SourceLocation();
DeclResult Res = ActOnVarTemplateSpecialization(
S, D, TInfo, TemplateKWLoc, TemplateParams, SC,
IsPartialSpecialization);
if (Res.isInvalid())
return nullptr;
NewVD = cast<VarDecl>(Res.get());
AddToScope = false;
} else if (D.isDecompositionDeclarator()) {
NewVD = DecompositionDecl::Create(Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), R, TInfo, SC,
Bindings);
} else
NewVD = VarDecl::Create(Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), II, R, TInfo, SC);
// If this is supposed to be a variable template, create it as such.
if (IsVariableTemplate) {
NewTemplate =
VarTemplateDecl::Create(Context, DC, D.getIdentifierLoc(), Name,
TemplateParams, NewVD);
NewVD->setDescribedVarTemplate(NewTemplate);
}
// If this decl has an auto type in need of deduction, make a note of the
// Decl so we can diagnose uses of it in its own initializer.
if (R->getContainedDeducedType())
ParsingInitForAutoVars.insert(NewVD);
if (D.isInvalidType() || Invalid) {
NewVD->setInvalidDecl();
if (NewTemplate)
NewTemplate->setInvalidDecl();
}
SetNestedNameSpecifier(*this, NewVD, D);
// If we have any template parameter lists that don't directly belong to
// the variable (matching the scope specifier), store them.
unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
NewVD->setTemplateParameterListsInfo(
Context, TemplateParamLists.drop_back(VDTemplateParamLists));
if (D.getDeclSpec().hasConstexprSpecifier()) {
NewVD->setConstexpr(true);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
if (NewVD->isStaticDataMember() && getLangOpts().CPlusPlus17)
NewVD->setImplicitlyInline();
if (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval)
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constexpr_wrong_decl_kind)
<< /*consteval*/ 1;
}
}
if (D.getDeclSpec().isInlineSpecified()) {
if (!getLangOpts().CPlusPlus) {
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< 0;
} else if (CurContext->isFunctionOrMethod()) {
// 'inline' is not allowed on block scope variable declaration.
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::err_inline_declaration_block_scope) << Name
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
} else {
Diag(D.getDeclSpec().getInlineSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_inline_variable
: diag::ext_inline_variable);
NewVD->setInlineSpecified();
}
}
// Set the lexical context. If the declarator has a C++ scope specifier, the
// lexical context will be different from the semantic context.
NewVD->setLexicalDeclContext(CurContext);
if (NewTemplate)
NewTemplate->setLexicalDeclContext(CurContext);
if (IsLocalExternDecl) {
if (D.isDecompositionDeclarator())
for (auto *B : Bindings)
B->setLocalExternDecl();
else
NewVD->setLocalExternDecl();
}
bool EmitTLSUnsupportedError = false;
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) {
// C++11 [dcl.stc]p4:
// When thread_local is applied to a variable of block scope the
// storage-class-specifier static is implied if it does not appear
// explicitly.
// Core issue: 'static' is not implied if the variable is declared
// 'extern'.
if (NewVD->hasLocalStorage() &&
(SCSpec != DeclSpec::SCS_unspecified ||
TSCS != DeclSpec::TSCS_thread_local ||
!DC->isFunctionOrMethod()))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
// error should be ignored.
EmitTLSUnsupportedError = true;
// We still need to mark the variable as TLS so it shows up in AST with
// proper storage class for other tools to use even if we're not going
// to emit any code for it.
NewVD->setTSCSpec(TSCS);
} else
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
} else
NewVD->setTSCSpec(TSCS);
}
// C99 6.7.4p3
// An inline definition of a function with external linkage shall
// not contain a definition of a modifiable object with static or
// thread storage duration...
// We only apply this when the function is required to be defined
// elsewhere, i.e. when the function is not 'extern inline'. Note
// that a local variable with thread storage duration still has to
// be marked 'static'. Also note that it's possible to get these
// semantics in C++ using __attribute__((gnu_inline)).
if (SC == SC_Static && S->getFnParent() != nullptr &&
!NewVD->getType().isConstQualified()) {
FunctionDecl *CurFD = getCurFunctionDecl();
if (CurFD && isFunctionDefinitionDiscarded(*this, CurFD)) {
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::warn_static_local_in_extern_inline);
MaybeSuggestAddingStaticToDecl(CurFD);
}
}
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (IsVariableTemplateSpecialization)
Diag(NewVD->getLocation(), diag::err_module_private_specialization)
<< (IsPartialSpecialization ? 1 : 0)
<< FixItHint::CreateRemoval(
D.getDeclSpec().getModulePrivateSpecLoc());
else if (IsMemberSpecialization)
Diag(NewVD->getLocation(), diag::err_module_private_specialization)
<< 2
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else if (NewVD->hasLocalStorage())
Diag(NewVD->getLocation(), diag::err_module_private_local)
<< 0 << NewVD->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else {
NewVD->setModulePrivate();
if (NewTemplate)
NewTemplate->setModulePrivate();
for (auto *B : Bindings)
B->setModulePrivate();
}
}
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
(getLangOpts().OpenMPIsDevice &&
NewVD->hasAttr<OMPDeclareTargetDeclAttr>())))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
if (SC == SC_None && S->getFnParent() != nullptr &&
(NewVD->hasAttr<CUDASharedAttr>() ||
NewVD->hasAttr<CUDAConstantAttr>())) {
NewVD->setStorageClass(SC_Static);
}
}
// Ensure that dllimport globals without explicit storage class are treated as
// extern. The storage class is set above using parsed attributes. Now we can
// check the VarDecl itself.
assert(!NewVD->hasAttr<DLLImportAttr>() ||
NewVD->getAttr<DLLImportAttr>()->isInherited() ||
NewVD->isStaticDataMember() || NewVD->getStorageClass() != SC_None);
// In auto-retain/release, infer strong retension for variables of
// retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*)D.getAsmLabel()) {
// The parser guarantees this is a string.
StringLiteral *SE = cast<StringLiteral>(E);
StringRef Label = SE->getString();
if (S->getFnParent() != nullptr) {
switch (SC) {
case SC_None:
case SC_Auto:
Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
break;
case SC_Register:
// Local Named register
if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
break;
case SC_Static:
case SC_Extern:
case SC_PrivateExtern:
break;
}
} else if (SC == SC_Register) {
// Global Named register
if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
const auto &TI = Context.getTargetInfo();
bool HasSizeMismatch;
if (!TI.isValidGCCRegisterName(Label))
Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
else if (!TI.validateGlobalRegisterVariable(Label,
Context.getTypeSize(R),
HasSizeMismatch))
Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
else if (HasSizeMismatch)
Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
}
if (!R->isIntegralType(Context) && !R->isPointerType()) {
Diag(D.getBeginLoc(), diag::err_asm_bad_register_type);
NewVD->setInvalidDecl(true);
}
}
NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
Context, Label, 0));
} else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
if (isDeclExternC(NewVD)) {
NewVD->addAttr(I->second);
ExtnameUndeclaredIdentifiers.erase(I);
} else
Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/1 << NewVD;
}
}
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
? getShadowedDeclaration(NewVD, Previous)
: nullptr;
// Don't consider existing declarations that are in a different
// scope and are out-of-semantic-context declarations (if the new
// declaration has linkage).
FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewVD),
D.getCXXScopeSpec().isNotEmpty() ||
IsMemberSpecialization ||
IsVariableTemplateSpecialization);
// Check whether the previous declaration is in the same block scope. This
// affects whether we merge types with it, per C++11 [dcl.array]p3.
if (getLangOpts().CPlusPlus &&
NewVD->isLocalVarDecl() && NewVD->hasExternalStorage())
NewVD->setPreviousDeclInSameBlockScope(
Previous.isSingleResult() && !Previous.isShadowed() &&
isDeclInScope(Previous.getFoundDecl(), OriginalDC, S, false));
if (!getLangOpts().CPlusPlus) {
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
} else {
// If this is an explicit specialization of a static data member, check it.
if (IsMemberSpecialization && !NewVD->isInvalidDecl() &&
CheckMemberSpecialization(NewVD, Previous))
NewVD->setInvalidDecl();
// Merge the decl with the existing one if appropriate.
if (!Previous.empty()) {
if (Previous.isSingleResult() &&
isa<FieldDecl>(Previous.getFoundDecl()) &&
D.getCXXScopeSpec().isSet()) {
// The user tried to define a non-static data member
// out-of-line (C++ [dcl.meaning]p1).
Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
<< D.getCXXScopeSpec().getRange();
Previous.clear();
NewVD->setInvalidDecl();
}
} else if (D.getCXXScopeSpec().isSet()) {
// No previous declaration in the qualifying scope.
Diag(D.getIdentifierLoc(), diag::err_no_member)
<< Name << computeDeclContext(D.getCXXScopeSpec(), true)
<< D.getCXXScopeSpec().getRange();
NewVD->setInvalidDecl();
}
if (!IsVariableTemplateSpecialization)
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
if (NewTemplate) {
VarTemplateDecl *PrevVarTemplate =
NewVD->getPreviousDecl()
? NewVD->getPreviousDecl()->getDescribedVarTemplate()
: nullptr;
// Check the template parameter list of this declaration, possibly
// merging in the template parameter list from the previous variable
// template declaration.
if (CheckTemplateParameterList(
TemplateParams,
PrevVarTemplate ? PrevVarTemplate->getTemplateParameters()
: nullptr,
(D.getCXXScopeSpec().isSet() && DC && DC->isRecord() &&
DC->isDependentContext())
? TPC_ClassTemplateMember
: TPC_VarTemplate))
NewVD->setInvalidDecl();
// If we are providing an explicit specialization of a static variable
// template, make a note of that.
if (PrevVarTemplate &&
PrevVarTemplate->getInstantiatedFromMemberTemplate())
PrevVarTemplate->setMemberSpecialization();
}
}
// Diagnose shadowed variables iff this isn't a redeclaration.
if (ShadowedDecl && !D.isRedeclaration())
CheckShadow(NewVD, ShadowedDecl, Previous);
ProcessPragmaWeak(S, NewVD);
// If this is the first declaration of an extern C variable, update
// the map of such variables.
if (NewVD->isFirstDecl() && !NewVD->isInvalidDecl() &&
isIncompleteDeclExternC(*this, NewVD))
RegisterLocallyScopedExternCDecl(NewVD, S);
if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
NewVD->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
NewVD, MCtx->getManglingNumber(
NewVD, getMSManglingNumber(getLangOpts(), S)));
Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
}
}
// Special handling of variable named 'main'.
if (Name.getAsIdentifierInfo() && Name.getAsIdentifierInfo()->isStr("main") &&
NewVD->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
!getLangOpts().Freestanding && !NewVD->getDescribedVarTemplate()) {
// C++ [basic.start.main]p3
// A program that declares a variable main at global scope is ill-formed.
if (getLangOpts().CPlusPlus)
Diag(D.getBeginLoc(), diag::err_main_global_variable);
// In C, and external-linkage variable named main results in undefined
// behavior.
else if (NewVD->hasExternalFormalLinkage())
Diag(D.getBeginLoc(), diag::warn_main_redefined);
}
if (D.isRedeclaration() && !Previous.empty()) {
NamedDecl *Prev = Previous.getRepresentativeDecl();
checkDLLAttributeRedeclaration(*this, Prev, NewVD, IsMemberSpecialization,
D.isFunctionDefinition());
}
if (NewTemplate) {
if (NewVD->isInvalidDecl())
NewTemplate->setInvalidDecl();
ActOnDocumentableDecl(NewTemplate);
return NewTemplate;
}
if (IsMemberSpecialization && !NewVD->isInvalidDecl())
CompleteMemberSpecialization(NewVD, Previous);
return NewVD;
}
/// Enum describing the %select options in diag::warn_decl_shadow.
enum ShadowedDeclKind {
SDK_Local,
SDK_Global,
SDK_StaticMember,
SDK_Field,
SDK_Typedef,
SDK_Using
};
/// Determine what kind of declaration we're shadowing.
static ShadowedDeclKind computeShadowedDeclKind(const NamedDecl *ShadowedDecl,
const DeclContext *OldDC) {
if (isa<TypeAliasDecl>(ShadowedDecl))
return SDK_Using;
else if (isa<TypedefDecl>(ShadowedDecl))
return SDK_Typedef;
else if (isa<RecordDecl>(OldDC))
return isa<FieldDecl>(ShadowedDecl) ? SDK_Field : SDK_StaticMember;
return OldDC->isFileContext() ? SDK_Global : SDK_Local;
}
/// Return the location of the capture if the given lambda captures the given
/// variable \p VD, or an invalid source location otherwise.
static SourceLocation getCaptureLocation(const LambdaScopeInfo *LSI,
const VarDecl *VD) {
for (const Capture &Capture : LSI->Captures) {
if (Capture.isVariableCapture() && Capture.getVariable() == VD)
return Capture.getLocation();
}
return SourceLocation();
}
static bool shouldWarnIfShadowedDecl(const DiagnosticsEngine &Diags,
const LookupResult &R) {
// Only diagnose if we're shadowing an unambiguous field or variable.
if (R.getResultKind() != LookupResult::Found)
return false;
// Return false if warning is ignored.
return !Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc());
}
/// Return the declaration shadowed by the given variable \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
const LookupResult &R) {
if (!shouldWarnIfShadowedDecl(Diags, R))
return nullptr;
// Don't diagnose declarations at file scope.
if (D->hasGlobalStorage())
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
return isa<VarDecl>(ShadowedDecl) || isa<FieldDecl>(ShadowedDecl)
? ShadowedDecl
: nullptr;
}
/// Return the declaration shadowed by the given typedef \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R) {
// Don't warn if typedef declaration is part of a class
if (D->getDeclContext()->isRecord())
return nullptr;
if (!shouldWarnIfShadowedDecl(Diags, R))
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
return isa<TypedefNameDecl>(ShadowedDecl) ? ShadowedDecl : nullptr;
}
/// Diagnose variable or built-in function shadowing. Implements
/// -Wshadow.
///
/// This method is called whenever a VarDecl is added to a "useful"
/// scope.
///
/// \param ShadowedDecl the declaration that is shadowed by the given variable
/// \param R the lookup of the name
///
void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R) {
DeclContext *NewDC = D->getDeclContext();
if (FieldDecl *FD = dyn_cast<FieldDecl>(ShadowedDecl)) {
// Fields are not shadowed by variables in C++ static methods.
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC))
if (MD->isStatic())
return;
// Fields shadowed by constructor parameters are a special case. Usually
// the constructor initializes the field with the parameter.
if (isa<CXXConstructorDecl>(NewDC))
if (const auto PVD = dyn_cast<ParmVarDecl>(D)) {
// Remember that this was shadowed so we can either warn about its
// modification or its existence depending on warning settings.
ShadowingDecls.insert({PVD->getCanonicalDecl(), FD});
return;
}
}
if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl))
if (shadowedVar->isExternC()) {
// For shadowing external vars, make sure that we point to the global
// declaration, not a locally scoped extern declaration.
for (auto I : shadowedVar->redecls())
if (I->isFileVarDecl()) {
ShadowedDecl = I;
break;
}
}
DeclContext *OldDC = ShadowedDecl->getDeclContext()->getRedeclContext();
unsigned WarningDiag = diag::warn_decl_shadow;
SourceLocation CaptureLoc;
if (isa<VarDecl>(D) && isa<VarDecl>(ShadowedDecl) && NewDC &&
isa<CXXMethodDecl>(NewDC)) {
if (const auto *RD = dyn_cast<CXXRecordDecl>(NewDC->getParent())) {
if (RD->isLambda() && OldDC->Encloses(NewDC->getLexicalParent())) {
if (RD->getLambdaCaptureDefault() == LCD_None) {
// Try to avoid warnings for lambdas with an explicit capture list.
const auto *LSI = cast<LambdaScopeInfo>(getCurFunction());
// Warn only when the lambda captures the shadowed decl explicitly.
CaptureLoc = getCaptureLocation(LSI, cast<VarDecl>(ShadowedDecl));
if (CaptureLoc.isInvalid())
WarningDiag = diag::warn_decl_shadow_uncaptured_local;
} else {
// Remember that this was shadowed so we can avoid the warning if the
// shadowed decl isn't captured and the warning settings allow it.
cast<LambdaScopeInfo>(getCurFunction())
->ShadowingDecls.push_back(
{cast<VarDecl>(D), cast<VarDecl>(ShadowedDecl)});
return;
}
}
if (cast<VarDecl>(ShadowedDecl)->hasLocalStorage()) {
// A variable can't shadow a local variable in an enclosing scope, if
// they are separated by a non-capturing declaration context.
for (DeclContext *ParentDC = NewDC;
ParentDC && !ParentDC->Equals(OldDC);
ParentDC = getLambdaAwareParentOfDeclContext(ParentDC)) {
// Only block literals, captured statements, and lambda expressions
// can capture; other scopes don't.
if (!isa<BlockDecl>(ParentDC) && !isa<CapturedDecl>(ParentDC) &&
!isLambdaCallOperator(ParentDC)) {
return;
}
}
}
}
}
// Only warn about certain kinds of shadowing for class members.
if (NewDC && NewDC->isRecord()) {
// In particular, don't warn about shadowing non-class members.
if (!OldDC->isRecord())
return;
// TODO: should we warn about static data members shadowing
// static data members from base classes?
// TODO: don't diagnose for inaccessible shadowed members.
// This is hard to do perfectly because we might friend the
// shadowing context, but that's just a false negative.
}
DeclarationName Name = R.getLookupName();
// Emit warning and note.
if (getSourceManager().isInSystemMacro(R.getNameLoc()))
return;
ShadowedDeclKind Kind = computeShadowedDeclKind(ShadowedDecl, OldDC);
Diag(R.getNameLoc(), WarningDiag) << Name << Kind << OldDC;
if (!CaptureLoc.isInvalid())
Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
<< Name << /*explicitly*/ 1;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
/// Diagnose shadowing for variables shadowed in the lambda record \p LambdaRD
/// when these variables are captured by the lambda.
void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
for (const auto &Shadow : LSI->ShadowingDecls) {
const VarDecl *ShadowedDecl = Shadow.ShadowedDecl;
// Try to avoid the warning when the shadowed decl isn't captured.
SourceLocation CaptureLoc = getCaptureLocation(LSI, ShadowedDecl);
const DeclContext *OldDC = ShadowedDecl->getDeclContext();
Diag(Shadow.VD->getLocation(), CaptureLoc.isInvalid()
? diag::warn_decl_shadow_uncaptured_local
: diag::warn_decl_shadow)
<< Shadow.VD->getDeclName()
<< computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
if (!CaptureLoc.isInvalid())
Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
<< Shadow.VD->getDeclName() << /*explicitly*/ 0;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
}
/// Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
return;
LookupResult R(*this, D->getDeclName(), D->getLocation(),
Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
LookupName(R, S);
if (NamedDecl *ShadowedDecl = getShadowedDeclaration(D, R))
CheckShadow(D, ShadowedDecl, R);
}
/// Check if 'E', which is an expression that is about to be modified, refers
/// to a constructor parameter that shadows a field.
void Sema::CheckShadowingDeclModification(Expr *E, SourceLocation Loc) {
// Quickly ignore expressions that can't be shadowing ctor parameters.
if (!getLangOpts().CPlusPlus || ShadowingDecls.empty())
return;
E = E->IgnoreParenImpCasts();
auto *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE)
return;
const NamedDecl *D = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
auto I = ShadowingDecls.find(D);
if (I == ShadowingDecls.end())
return;
const NamedDecl *ShadowedDecl = I->second;
const DeclContext *OldDC = ShadowedDecl->getDeclContext();
Diag(Loc, diag::warn_modifying_shadowing_decl) << D << OldDC;
Diag(D->getLocation(), diag::note_var_declared_here) << D;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
// Avoid issuing multiple warnings about the same decl.
ShadowingDecls.erase(I);
}
/// Check for conflict between this global or extern "C" declaration and
/// previous global or extern "C" declarations. This is only used in C++.
template<typename T>
static bool checkGlobalOrExternCConflict(
Sema &S, const T *ND, bool IsGlobal, LookupResult &Previous) {
assert(S.getLangOpts().CPlusPlus && "only C++ has extern \"C\"");
NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName());
if (!Prev && IsGlobal && !isIncompleteDeclExternC(S, ND)) {
// The common case: this global doesn't conflict with any extern "C"
// declaration.
return false;
}
if (Prev) {
if (!IsGlobal || isIncompleteDeclExternC(S, ND)) {
// Both the old and new declarations have C language linkage. This is a
// redeclaration.
Previous.clear();
Previous.addDecl(Prev);
return true;
}
// This is a global, non-extern "C" declaration, and there is a previous
// non-global extern "C" declaration. Diagnose if this is a variable
// declaration.
if (!isa<VarDecl>(ND))
return false;
} else {
// The declaration is extern "C". Check for any declaration in the
// translation unit which might conflict.
if (IsGlobal) {
// We have already performed the lookup into the translation unit.
IsGlobal = false;
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
if (isa<VarDecl>(*I)) {
Prev = *I;
break;
}
}
} else {
DeclContext::lookup_result R =
S.Context.getTranslationUnitDecl()->lookup(ND->getDeclName());
for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
I != E; ++I) {
if (isa<VarDecl>(*I)) {
Prev = *I;
break;
}
// FIXME: If we have any other entity with this name in global scope,
// the declaration is ill-formed, but that is a defect: it breaks the
// 'stat' hack, for instance. Only variables can have mangled name
// clashes with extern "C" declarations, so only they deserve a
// diagnostic.
}
}
if (!Prev)
return false;
}
// Use the first declaration's location to ensure we point at something which
// is lexically inside an extern "C" linkage-spec.
assert(Prev && "should have found a previous declaration to diagnose");
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Prev))
Prev = FD->getFirstDecl();
else
Prev = cast<VarDecl>(Prev)->getFirstDecl();
S.Diag(ND->getLocation(), diag::err_extern_c_global_conflict)
<< IsGlobal << ND;
S.Diag(Prev->getLocation(), diag::note_extern_c_global_conflict)
<< IsGlobal;
return false;
}
/// Apply special rules for handling extern "C" declarations. Returns \c true
/// if we have found that this is a redeclaration of some prior entity.
///
/// Per C++ [dcl.link]p6:
/// Two declarations [for a function or variable] with C language linkage
/// with the same name that appear in different scopes refer to the same
/// [entity]. An entity with C language linkage shall not be declared with
/// the same name as an entity in global scope.
template<typename T>
static bool checkForConflictWithNonVisibleExternC(Sema &S, const T *ND,
LookupResult &Previous) {
if (!S.getLangOpts().CPlusPlus) {
// In C, when declaring a global variable, look for a corresponding 'extern'
// variable declared in function scope. We don't need this in C++, because
// we find local extern decls in the surrounding file-scope DeclContext.
if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName())) {
Previous.clear();
Previous.addDecl(Prev);
return true;
}
}
return false;
}
// A declaration in the translation unit can conflict with an extern "C"
// declaration.
if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit())
return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/true, Previous);
// An extern "C" declaration can conflict with a declaration in the
// translation unit or can be a redeclaration of an extern "C" declaration
// in another scope.
if (isIncompleteDeclExternC(S,ND))
return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/false, Previous);
// Neither global nor extern "C": nothing to do.
return false;
}
void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// If the decl is already known invalid, don't check it.
if (NewVD->isInvalidDecl())
return;
QualType T = NewVD->getType();
// Defer checking an 'auto' type until its initializer is attached.
if (T->isUndeducedType())
return;
if (NewVD->hasAttrs())
CheckAlignasUnderalignment(NewVD);
if (T->isObjCObjectType()) {
Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(NewVD->getLocation(), "*");
T = Context.getObjCObjectPointerType(T);
NewVD->setType(T);
}
// Emit an error if an address space was applied to decl with local storage.
// This includes arrays of objects with address space qualifiers, but not
// automatic variables that point to other address spaces.
// ISO/IEC TR 18037 S5.1.2
if (!getLangOpts().OpenCL && NewVD->hasLocalStorage() &&
T.getAddressSpace() != LangAS::Default) {
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl) << 0;
NewVD->setInvalidDecl();
return;
}
// OpenCL v1.2 s6.8 - The static qualifier is valid only in program
// scope.
if (getLangOpts().OpenCLVersion == 120 &&
!getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers") &&
NewVD->isStaticLocal()) {
Diag(NewVD->getLocation(), diag::err_static_function_scope);
NewVD->setInvalidDecl();
return;
}
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - The __block storage type is not supported.
if (NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_opencl_block_storage_type);
return;
}
if (T->isBlockPointerType()) {
// OpenCL v2.0 s6.12.5 - Any block declaration must be const qualified and
// can't use 'extern' storage class.
if (!T.isConstQualified()) {
Diag(NewVD->getLocation(), diag::err_opencl_invalid_block_declaration)
<< 0 /*const*/;
NewVD->setInvalidDecl();
return;
}
if (NewVD->hasExternalStorage()) {
Diag(NewVD->getLocation(), diag::err_opencl_extern_block_declaration);
NewVD->setInvalidDecl();
return;
}
}
// OpenCL C v1.2 s6.5 - All program scope variables must be declared in the
// __constant address space.
// OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
// variables inside a function can also be declared in the global
// address space.
// C++ for OpenCL inherits rule from OpenCL C v2.0.
// FIXME: Adding local AS in C++ for OpenCL might make sense.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
(getLangOpts().OpenCLVersion == 200 ||
getLangOpts().OpenCLCPlusPlus)))) {
int Scope = NewVD->isStaticLocal() | NewVD->hasExternalStorage() << 1;
if (getLangOpts().OpenCLVersion == 200 || getLangOpts().OpenCLCPlusPlus)
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "global or constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "constant";
NewVD->setInvalidDecl();
return;
}
} else {
if (T.getAddressSpace() == LangAS::opencl_global) {
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 1 /*is any function*/ << "global";
NewVD->setInvalidDecl();
return;
}
if (T.getAddressSpace() == LangAS::opencl_constant ||
T.getAddressSpace() == LangAS::opencl_local) {
FunctionDecl *FD = getCurFunctionDecl();
// OpenCL v1.1 s6.5.2 and s6.5.3: no local or constant variables
// in functions.
if (FD && !FD->hasAttr<OpenCLKernelAttr>()) {
if (T.getAddressSpace() == LangAS::opencl_constant)
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 0 /*non-kernel only*/ << "constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 0 /*non-kernel only*/ << "local";
NewVD->setInvalidDecl();
return;
}
// OpenCL v2.0 s6.5.2 and s6.5.3: local and constant variables must be
// in the outermost scope of a kernel function.
if (FD && FD->hasAttr<OpenCLKernelAttr>()) {
if (!getCurScope()->isFunctionScope()) {
if (T.getAddressSpace() == LangAS::opencl_constant)
Diag(NewVD->getLocation(), diag::err_opencl_addrspace_scope)
<< "constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_addrspace_scope)
<< "local";
NewVD->setInvalidDecl();
return;
}
}
} else if (T.getAddressSpace() != LangAS::opencl_private &&
// If we are parsing a template we didn't deduce an addr
// space yet.
T.getAddressSpace() != LangAS::Default) {
// Do not allow other address spaces on automatic variable.
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl) << 1;
NewVD->setInvalidDecl();
return;
}
}
}
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>()) {
if (getLangOpts().getGC() != LangOptions::NonGC)
Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
else {
assert(!getLangOpts().ObjCAutoRefCount);
Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
}
}
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
setFunctionHasBranchProtectedScope();
if ((isVM && NewVD->hasLinkage()) ||
(T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(
NewVD->getTypeSourceInfo(), Context, SizeIsNegative, Oversized);
QualType FixedT;
if (FixedTInfo && T == NewVD->getTypeSourceInfo()->getType())
FixedT = FixedTInfo->getType();
else if (FixedTInfo) {
// Type and type-as-written are canonically different. We need to fix up
// both types separately.
FixedT = TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
Oversized);
}
if ((!FixedTInfo || FixedT.isNull()) && T->isVariableArrayType()) {
const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
// FIXME: This won't give the correct result for
// int a[10][n];
SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange();
if (NewVD->isFileVarDecl())
Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope)
<< SizeRange;
else if (NewVD->isStaticLocal())
Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage)
<< SizeRange;
else
Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage)
<< SizeRange;
NewVD->setInvalidDecl();
return;
}
if (!FixedTInfo) {
if (NewVD->isFileVarDecl())
Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
else
Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage);
NewVD->setInvalidDecl();
return;
}
Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
NewVD->setType(FixedT);
NewVD->setTypeSourceInfo(FixedTInfo);
}
if (T->isVoidType()) {
// C++98 [dcl.stc]p5: The extern specifier can be applied only to the names
// of objects and functions.
if (NewVD->isThisDeclarationADefinition() || getLangOpts().CPlusPlus) {
Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type)
<< T;
NewVD->setInvalidDecl();
return;
}
}
if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_nonlocal);
NewVD->setInvalidDecl();
return;
}
if (isVM && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_vm);
NewVD->setInvalidDecl();
return;
}
if (NewVD->isConstexpr() && !T->isDependentType() &&
RequireLiteralType(NewVD->getLocation(), T,
diag::err_constexpr_var_non_literal)) {
NewVD->setInvalidDecl();
return;
}
}
/// Perform semantic checking on a newly-created variable
/// declaration.
///
/// This routine performs all of the type-checking required for a
/// variable declaration once it has been built. It is used both to
/// check variables after they have been parsed and their declarators
/// have been translated into a declaration, and to check variables
/// that have been instantiated from a template.
///
/// Sets NewVD->isInvalidDecl() if an error was encountered.
///
/// Returns true if the variable declaration is a redeclaration.
bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
CheckVariableDeclarationType(NewVD);
// If the decl is already known invalid, don't check it.
if (NewVD->isInvalidDecl())
return false;
// If we did not find anything by this name, look for a non-visible
// extern "C" declaration with the same name.
if (Previous.empty() &&
checkForConflictWithNonVisibleExternC(*this, NewVD, Previous))
Previous.setShadowed();
if (!Previous.empty()) {
MergeVarDecl(NewVD, Previous);
return true;
}
return false;
}
namespace {
struct FindOverriddenMethod {
Sema *S;
CXXMethodDecl *Method;
/// Member lookup function that determines whether a given C++
/// method overrides a method in a base class, to be used with
/// CXXRecordDecl::lookupInBases().
bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
RecordDecl *BaseRecord =
Specifier->getType()->getAs<RecordType>()->getDecl();
DeclarationName Name = Method->getDeclName();
// FIXME: Do we care about other names here too?
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// We really want to find the base class destructor here.
QualType T = S->Context.getTypeDeclType(BaseRecord);
CanQualType CT = S->Context.getCanonicalType(T);
Name = S->Context.DeclarationNames.getCXXDestructorName(CT);
}
for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
NamedDecl *D = Path.Decls.front();
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isVirtual() && !S->IsOverload(Method, MD, false))
return true;
}
}
return false;
}
};
enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
} // end anonymous namespace
/// Report an error regarding overriding, along with any relevant
/// overridden methods.
///
/// \param DiagID the primary error to report.
/// \param MD the overriding method.
/// \param OEK which overrides to include as notes.
static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
OverrideErrorKind OEK = OEK_All) {
S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
for (const CXXMethodDecl *O : MD->overridden_methods()) {
// This check (& the OEK parameter) could be replaced by a predicate, but
// without lambdas that would be overkill. This is still nicer than writing
// out the diag loop 3 times.
if ((OEK == OEK_All) ||
(OEK == OEK_NonDeleted && !O->isDeleted()) ||
(OEK == OEK_Deleted && O->isDeleted()))
S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
}
}
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
// Look for methods in base classes that this method might override.
CXXBasePaths Paths;
FindOverriddenMethod FOM;
FOM.Method = MD;
FOM.S = this;
bool hasDeletedOverridenMethods = false;
bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
if (DC->lookupInBases(FOM, Paths)) {
for (auto *I : Paths.found_decls()) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
MD->addOverriddenMethod(OldMD->getCanonicalDecl());
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
!CheckOverridingFunctionAttributes(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
hasDeletedOverridenMethods |= OldMD->isDeleted();
hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
AddedAny = true;
}
}
}
}
if (hasDeletedOverridenMethods && !MD->isDeleted()) {
ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
}
if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
}
return AddedAny;
}
namespace {
// Struct for holding all of the extra arguments needed by
// DiagnoseInvalidRedeclaration to call Sema::ActOnFunctionDeclarator.
struct ActOnFDArgs {
Scope *S;
Declarator &D;
MultiTemplateParamsArg TemplateParamLists;
bool AddToScope;
};
} // end anonymous namespace
namespace {
// Callback to only accept typo corrections that have a non-zero edit distance.
// Also only accept corrections that have the same parent decl.
class DifferentNameValidatorCCC final : public CorrectionCandidateCallback {
public:
DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD,
CXXRecordDecl *Parent)
: Context(Context), OriginalFD(TypoFD),
ExpectedParent(Parent ? Parent->getCanonicalDecl() : nullptr) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
if (candidate.getEditDistance() == 0)
return false;
SmallVector<unsigned, 1> MismatchedParams;
for (TypoCorrection::const_decl_iterator CDecl = candidate.begin(),
CDeclEnd = candidate.end();
CDecl != CDeclEnd; ++CDecl) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
if (FD && !FD->hasBody() &&
hasSimilarParameters(Context, FD, OriginalFD, MismatchedParams)) {
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
CXXRecordDecl *Parent = MD->getParent();
if (Parent && Parent->getCanonicalDecl() == ExpectedParent)
return true;
} else if (!ExpectedParent) {
return true;
}
}
}
return false;
}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return llvm::make_unique<DifferentNameValidatorCCC>(*this);
}
private:
ASTContext &Context;
FunctionDecl *OriginalFD;
CXXRecordDecl *ExpectedParent;
};
} // end anonymous namespace
void Sema::MarkTypoCorrectedFunctionDefinition(const NamedDecl *F) {
TypoCorrectedFunctionDefinitions.insert(F);
}
/// Generate diagnostics for an invalid function redeclaration.
///
/// This routine handles generating the diagnostic messages for an invalid
/// function redeclaration, including finding possible similar declarations
/// or performing typo correction if there are no previous declarations with
/// the same name.
///
/// Returns a NamedDecl iff typo correction was performed and substituting in
/// the new declaration name does not cause new errors.
static NamedDecl *DiagnoseInvalidRedeclaration(
Sema &SemaRef, LookupResult &Previous, FunctionDecl *NewFD,
ActOnFDArgs &ExtraArgs, bool IsLocalFriend, Scope *S) {
DeclarationName Name = NewFD->getDeclName();
DeclContext *NewDC = NewFD->getDeclContext();
SmallVector<unsigned, 1> MismatchedParams;
SmallVector<std::pair<FunctionDecl *, unsigned>, 1> NearMatches;
TypoCorrection Correction;
bool IsDefinition = ExtraArgs.D.isFunctionDefinition();
unsigned DiagMsg =
IsLocalFriend ? diag::err_no_matching_local_friend :
NewFD->getFriendObjectKind() ? diag::err_qualified_friend_no_match :
diag::err_member_decl_does_not_match;
LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
IsLocalFriend ? Sema::LookupLocalFriendName
: Sema::LookupOrdinaryName,
Sema::ForVisibleRedeclaration);
NewFD->setInvalidDecl();
if (IsLocalFriend)
SemaRef.LookupName(Prev, S);
else
SemaRef.LookupQualifiedName(Prev, NewDC);
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
DifferentNameValidatorCCC CCC(SemaRef.Context, NewFD,
MD ? MD->getParent() : nullptr);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func);
if (FD &&
hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
// Add 1 to the index so that 0 can mean the mismatch didn't
// involve a parameter
unsigned ParamNum =
MismatchedParams.empty() ? 0 : MismatchedParams.front() + 1;
NearMatches.push_back(std::make_pair(FD, ParamNum));
}
}
// If the qualified name lookup yielded nothing, try typo correction
} else if ((Correction = SemaRef.CorrectTypo(
Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
&ExtraArgs.D.getCXXScopeSpec(), CCC, Sema::CTK_ErrorRecovery,
IsLocalFriend ? nullptr : NewDC))) {
// Set up everything for the call to ActOnFunctionDeclarator
ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
Previous.clear();
Previous.setLookupName(Correction.getCorrection());
for (TypoCorrection::decl_iterator CDecl = Correction.begin(),
CDeclEnd = Correction.end();
CDecl != CDeclEnd; ++CDecl) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
if (FD && !FD->hasBody() &&
hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
Previous.addDecl(FD);
}
}
bool wasRedeclaration = ExtraArgs.D.isRedeclaration();
NamedDecl *Result;
// Retry building the function declaration with the new previous
// declarations, and with errors suppressed.
{
// Trap errors.
Sema::SFINAETrap Trap(SemaRef);
// TODO: Refactor ActOnFunctionDeclarator so that we can call only the
// pieces need to verify the typo-corrected C++ declaration and hopefully
// eliminate the need for the parameter pack ExtraArgs.
Result = SemaRef.ActOnFunctionDeclarator(
ExtraArgs.S, ExtraArgs.D,
Correction.getCorrectionDecl()->getDeclContext(),
NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists,
ExtraArgs.AddToScope);
if (Trap.hasErrorOccurred())
Result = nullptr;
}
if (Result) {
// Determine which correction we picked.
Decl *Canonical = Result->getCanonicalDecl();
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I)
if ((*I)->getCanonicalDecl() == Canonical)
Correction.setCorrectionDecl(*I);
// Let Sema know about the correction.
SemaRef.MarkTypoCorrectedFunctionDefinition(Result);
SemaRef.diagnoseTypo(
Correction,
SemaRef.PDiag(IsLocalFriend
? diag::err_no_matching_local_friend_suggest
: diag::err_member_decl_does_not_match_suggest)
<< Name << NewDC << IsDefinition);
return Result;
}
// Pretend the typo correction never occurred
ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
ExtraArgs.D.setRedeclaration(wasRedeclaration);
Previous.clear();
Previous.setLookupName(Name);
}
SemaRef.Diag(NewFD->getLocation(), DiagMsg)
<< Name << NewDC << IsDefinition << NewFD->getLocation();
bool NewFDisConst = false;
if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD))
NewFDisConst = NewMD->isConst();
for (SmallVectorImpl<std::pair<FunctionDecl *, unsigned> >::iterator
NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end();
NearMatch != NearMatchEnd; ++NearMatch) {
FunctionDecl *FD = NearMatch->first;
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
bool FDisConst = MD && MD->isConst();
bool IsMember = MD || !IsLocalFriend;
// FIXME: These notes are poorly worded for the local friend case.
if (unsigned Idx = NearMatch->second) {
ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
SourceLocation Loc = FDParam->getTypeSpecStartLoc();
if (Loc.isInvalid()) Loc = FD->getLocation();
SemaRef.Diag(Loc, IsMember ? diag::note_member_def_close_param_match
: diag::note_local_decl_close_param_match)
<< Idx << FDParam->getType()
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
<< NewFDisConst << FD->getSourceRange().getEnd();
} else
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
: diag::note_local_decl_close_match);
}
return nullptr;
}
static StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) {
switch (D.getDeclSpec().getStorageClassSpec()) {
default: llvm_unreachable("Unknown storage class!");
case DeclSpec::SCS_auto:
case DeclSpec::SCS_register:
case DeclSpec::SCS_mutable:
SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_typecheck_sclass_func);
D.getMutableDeclSpec().ClearStorageClassSpecs();
D.setInvalidType();
break;
case DeclSpec::SCS_unspecified: break;
case DeclSpec::SCS_extern:
if (D.getDeclSpec().isExternInLinkageSpec())
return SC_None;
return SC_Extern;
case DeclSpec::SCS_static: {
if (SemaRef.CurContext->getRedeclContext()->isFunctionOrMethod()) {
// C99 6.7.1p5:
// The declaration of an identifier for a function that has
// block scope shall have no explicit storage-class specifier
// other than extern
// See also (C++ [dcl.stc]p4).
SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_static_block_func);
break;
} else
return SC_Static;
}
case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
}
// No explicit storage class has already been returned
return SC_None;
}
static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
DeclContext *DC, QualType &R,
TypeSourceInfo *TInfo,
StorageClass SC,
bool &IsVirtualOkay) {
DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
FunctionDecl *NewFD = nullptr;
bool isInline = D.getDeclSpec().isInlineSpecified();
if (!SemaRef.getLangOpts().CPlusPlus) {
// Determine whether the function was written with a
// prototype. This true when:
// - there is a prototype in the declarator, or
// - the type R of the function is some kind of typedef or other non-
// attributed reference to a type name (which eventually refers to a
// function type).
bool HasPrototype =
(D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
(!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
R, TInfo, SC, isInline, HasPrototype,
CSK_unspecified);
if (D.isInvalidType())
NewFD->setInvalidDecl();
return NewFD;
}
ExplicitSpecifier ExplicitSpecifier = D.getDeclSpec().getExplicitSpecifier();
ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
// Check that the return type is not an abstract class type.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
if (!DC->isRecord() &&
SemaRef.RequireNonAbstractType(
D.getIdentifierLoc(), R->getAs<FunctionType>()->getReturnType(),
diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
D.setInvalidType();
if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
// This is a C++ constructor declaration.
assert(DC->isRecord() &&
"Constructors can only be declared in a member context");
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
return CXXConstructorDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
TInfo, ExplicitSpecifier, isInline,
/*isImplicitlyDeclared=*/false, ConstexprKind);
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
if (DC->isRecord()) {
R = SemaRef.CheckDestructorDeclarator(D, R, SC);
CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
CXXDestructorDecl *NewDD =
CXXDestructorDecl::Create(SemaRef.Context, Record, D.getBeginLoc(),
NameInfo, R, TInfo, isInline,
/*isImplicitlyDeclared=*/false);
// If the destructor needs an implicit exception specification, set it
// now. FIXME: It'd be nice to be able to create the right type to start
// with, but the type needs to reference the destructor declaration.
if (SemaRef.getLangOpts().CPlusPlus11)
SemaRef.AdjustDestructorExceptionSpec(NewDD);
IsVirtualOkay = true;
return NewDD;
} else {
SemaRef.Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
D.setInvalidType();
// Create a FunctionDecl to satisfy the function definition parsing
// code path.
return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), Name, R, TInfo, SC,
isInline,
/*hasPrototype=*/true, ConstexprKind);
}
} else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
if (!DC->isRecord()) {
SemaRef.Diag(D.getIdentifierLoc(),
diag::err_conv_function_not_member);
return nullptr;
}
SemaRef.CheckConversionDeclarator(D, R, SC);
IsVirtualOkay = true;
return CXXConversionDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
TInfo, isInline, ExplicitSpecifier, ConstexprKind, SourceLocation());
} else if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
ExplicitSpecifier, NameInfo, R, TInfo,
D.getEndLoc());
} else if (DC->isRecord()) {
// If the name of the function is the same as the name of the record,
// then this must be an invalid constructor that has a return type.
// (The parser checks for a return type and makes the declarator a
// constructor if it has no return type).
if (Name.getAsIdentifierInfo() &&
Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
SemaRef.Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
<< SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
<< SourceRange(D.getIdentifierLoc());
return nullptr;
}
// This is a C++ method declaration.
CXXMethodDecl *Ret = CXXMethodDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
TInfo, SC, isInline, ConstexprKind, SourceLocation());
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
bool isFriend =
SemaRef.getLangOpts().CPlusPlus && D.getDeclSpec().isFriendSpecified();
if (!isFriend && SemaRef.CurContext->isRecord())
return nullptr;
// Determine whether the function was written with a
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
R, TInfo, SC, isInline, true /*HasPrototype*/,
ConstexprKind);
}
}
enum OpenCLParamType {
ValidKernelParam,
PtrPtrKernelParam,
PtrKernelParam,
InvalidAddrSpacePtrKernelParam,
InvalidKernelParam,
RecordKernelParam
};
static bool isOpenCLSizeDependentType(ASTContext &C, QualType Ty) {
// Size dependent types are just typedefs to normal integer types
// (e.g. unsigned long), so we cannot distinguish them from other typedefs to
// integers other than by their names.
StringRef SizeTypeNames[] = {"size_t", "intptr_t", "uintptr_t", "ptrdiff_t"};
// Remove typedefs one by one until we reach a typedef
// for a size dependent type.
QualType DesugaredTy = Ty;
do {
ArrayRef<StringRef> Names(SizeTypeNames);
auto Match = llvm::find(Names, DesugaredTy.getAsString());
if (Names.end() != Match)
return true;
Ty = DesugaredTy;
DesugaredTy = Ty.getSingleStepDesugaredType(C);
} while (DesugaredTy != Ty);
return false;
}
static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
if (PT->isPointerType()) {
QualType PointeeType = PT->getPointeeType();
if (PointeeType->isPointerType())
return PtrPtrKernelParam;
if (PointeeType.getAddressSpace() == LangAS::opencl_generic ||
PointeeType.getAddressSpace() == LangAS::opencl_private ||
PointeeType.getAddressSpace() == LangAS::Default)
return InvalidAddrSpacePtrKernelParam;
return PtrKernelParam;
}
// OpenCL v1.2 s6.9.k:
// Arguments to kernel functions in a program cannot be declared with the
// built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
// uintptr_t or a struct and/or union that contain fields declared to be one
// of these built-in scalar types.
if (isOpenCLSizeDependentType(S.getASTContext(), PT))
return InvalidKernelParam;
if (PT->isImageType())
return PtrKernelParam;
if (PT->isBooleanType() || PT->isEventT() || PT->isReserveIDT())
return InvalidKernelParam;
// OpenCL extension spec v1.2 s9.5:
// This extension adds support for half scalar and vector types as built-in
// types that can be used for arithmetic operations, conversions etc.
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16") && PT->isHalfType())
return InvalidKernelParam;
if (PT->isRecordType())
return RecordKernelParam;
// Look into an array argument to check if it has a forbidden type.
if (PT->isArrayType()) {
const Type *UnderlyingTy = PT->getPointeeOrArrayElementType();
// Call ourself to check an underlying type of an array. Since the
// getPointeeOrArrayElementType returns an innermost type which is not an
// array, this recursive call only happens once.
return getOpenCLKernelParameterType(S, QualType(UnderlyingTy, 0));
}
return ValidKernelParam;
}
static void checkIsValidOpenCLKernelParameter(
Sema &S,
Declarator &D,
ParmVarDecl *Param,
llvm::SmallPtrSetImpl<const Type *> &ValidTypes) {
QualType PT = Param->getType();
// Cache the valid types we encounter to avoid rechecking structs that are
// used again
if (ValidTypes.count(PT.getTypePtr()))
return;
switch (getOpenCLKernelParameterType(S, PT)) {
case PtrPtrKernelParam:
// OpenCL v1.2 s6.9.a:
// A kernel function argument cannot be declared as a
// pointer to a pointer type.
S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
D.setInvalidType();
return;
case InvalidAddrSpacePtrKernelParam:
// OpenCL v1.0 s6.5:
// __kernel function arguments declared to be a pointer of a type can point
// to one of the following address spaces only : __global, __local or
// __constant.
S.Diag(Param->getLocation(), diag::err_kernel_arg_address_space);
D.setInvalidType();
return;
// OpenCL v1.2 s6.9.k:
// Arguments to kernel functions in a program cannot be declared with the
// built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
// uintptr_t or a struct and/or union that contain fields declared to be
// one of these built-in scalar types.
case InvalidKernelParam:
// OpenCL v1.2 s6.8 n:
// A kernel function argument cannot be declared
// of event_t type.
// Do not diagnose half type since it is diagnosed as invalid argument
// type for any function elsewhere.
if (!PT->isHalfType()) {
S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
// Explain what typedefs are involved.
const TypedefType *Typedef = nullptr;
while ((Typedef = PT->getAs<TypedefType>())) {
SourceLocation Loc = Typedef->getDecl()->getLocation();
// SourceLocation may be invalid for a built-in type.
if (Loc.isValid())
S.Diag(Loc, diag::note_entity_declared_at) << PT;
PT = Typedef->desugar();
}
}
D.setInvalidType();
return;
case PtrKernelParam:
case ValidKernelParam:
ValidTypes.insert(PT.getTypePtr());
return;
case RecordKernelParam:
break;
}
// Track nested structs we will inspect
SmallVector<const Decl *, 4> VisitStack;
// Track where we are in the nested structs. Items will migrate from
// VisitStack to HistoryStack as we do the DFS for bad field.
SmallVector<const FieldDecl *, 4> HistoryStack;
HistoryStack.push_back(nullptr);
// At this point we already handled everything except of a RecordType or
// an ArrayType of a RecordType.
assert((PT->isArrayType() || PT->isRecordType()) && "Unexpected type.");
const RecordType *RecTy =
PT->getPointeeOrArrayElementType()->getAs<RecordType>();
const RecordDecl *OrigRecDecl = RecTy->getDecl();
VisitStack.push_back(RecTy->getDecl());
assert(VisitStack.back() && "First decl null?");
do {
const Decl *Next = VisitStack.pop_back_val();
if (!Next) {
assert(!HistoryStack.empty());
// Found a marker, we have gone up a level
if (const FieldDecl *Hist = HistoryStack.pop_back_val())
ValidTypes.insert(Hist->getType().getTypePtr());
continue;
}
// Adds everything except the original parameter declaration (which is not a
// field itself) to the history stack.
const RecordDecl *RD;
if (const FieldDecl *Field = dyn_cast<FieldDecl>(Next)) {
HistoryStack.push_back(Field);
QualType FieldTy = Field->getType();
// Other field types (known to be valid or invalid) are handled while we
// walk around RecordDecl::fields().
assert((FieldTy->isArrayType() || FieldTy->isRecordType()) &&
"Unexpected type.");
const Type *FieldRecTy = FieldTy->getPointeeOrArrayElementType();
RD = FieldRecTy->castAs<RecordType>()->getDecl();
} else {
RD = cast<RecordDecl>(Next);
}
// Add a null marker so we know when we've gone back up a level
VisitStack.push_back(nullptr);
for (const auto *FD : RD->fields()) {
QualType QT = FD->getType();
if (ValidTypes.count(QT.getTypePtr()))
continue;
OpenCLParamType ParamType = getOpenCLKernelParameterType(S, QT);
if (ParamType == ValidKernelParam)
continue;
if (ParamType == RecordKernelParam) {
VisitStack.push_back(FD);
continue;
}
// OpenCL v1.2 s6.9.p:
// Arguments to kernel functions that are declared to be a struct or union
// do not allow OpenCL objects to be passed as elements of the struct or
// union.
if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
ParamType == InvalidAddrSpacePtrKernelParam) {
S.Diag(Param->getLocation(),
diag::err_record_with_pointers_kernel_param)
<< PT->isUnionType()
<< PT;
} else {
S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
}
S.Diag(OrigRecDecl->getLocation(), diag::note_within_field_of_type)
<< OrigRecDecl->getDeclName();
// We have an error, now let's go back up through history and show where
// the offending field came from
for (ArrayRef<const FieldDecl *>::const_iterator
I = HistoryStack.begin() + 1,
E = HistoryStack.end();
I != E; ++I) {
const FieldDecl *OuterField = *I;
S.Diag(OuterField->getLocation(), diag::note_within_field_of_type)
<< OuterField->getType();
}
S.Diag(FD->getLocation(), diag::note_illegal_field_declared_here)
<< QT->isPointerType()
<< QT;
D.setInvalidType();
return;
}
} while (!VisitStack.empty());
}
/// Find the DeclContext in which a tag is implicitly declared if we see an
/// elaborated type specifier in the specified context, and lookup finds
/// nothing.
static DeclContext *getTagInjectionContext(DeclContext *DC) {
while (!DC->isFileContext() && !DC->isFunctionOrMethod())
DC = DC->getParent();
return DC;
}
/// Find the Scope in which a tag is implicitly declared if we see an
/// elaborated type specifier in the specified context, and lookup finds
/// nothing.
static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
while (S->isClassScope() ||
(LangOpts.CPlusPlus &&
S->isFunctionPrototypeScope()) ||
((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() && S->getEntity()->isTransparentContext()))
S = S->getParent();
return S;
}
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope) {
QualType R = TInfo->getType();
assert(R->isFunctionType());
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
StorageClass SC = getFunctionStorageClass(*this, D);
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
if (D.isFirstDeclarationOfMember())
adjustMemberFunctionCC(R, D.isStaticMember(), D.isCtorOrDtor(),
D.getIdentifierLoc());
bool isFriend = false;
FunctionTemplateDecl *FunctionTemplate = nullptr;
bool isMemberSpecialization = false;
bool isFunctionTemplateSpecialization = false;
bool isDependentClassScopeExplicitSpecialization = false;
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
bool isVirtualOkay = false;
DeclContext *OriginalDC = DC;
bool IsLocalExternDecl = adjustContextForLocalExternDecl(DC);
FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC,
isVirtualOkay);
if (!NewFD) return nullptr;
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer())
NewFD->setTopLevelDeclInObjCContainer();
// Set the lexical context. If this is a function-scope declaration, or has a
// C++ scope specifier, or is the object of a friend declaration, the lexical
// context will be different from the semantic context.
NewFD->setLexicalDeclContext(CurContext);
if (IsLocalExternDecl)
NewFD->setLocalExternDecl();
if (getLangOpts().CPlusPlus) {
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool hasExplicit = D.getDeclSpec().hasExplicitSpecifier();
ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
isFriend = D.getDeclSpec().isFriendSpecified();
if (isFriend && !isInline && D.isFunctionDefinition()) {
// C++ [class.friend]p5
// A function can be defined in a friend declaration of a
// class . . . . Such a function is implicitly inline.
NewFD->setImplicitlyInline();
}
// If this is a method defined in an __interface, and is not a constructor
// or an overloaded operator, then set the pure flag (isVirtual will already
// return true).
if (const CXXRecordDecl *Parent =
dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) {
if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided())
NewFD->setPure(true);
// C++ [class.union]p2
// A union can have member functions, but not virtual functions.
if (isVirtual && Parent->isUnion())
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
}
SetNestedNameSpecifier(*this, NewFD, D);
isMemberSpecialization = false;
isFunctionTemplateSpecialization = false;
if (D.isInvalidType())
NewFD->setInvalidDecl();
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool Invalid = false;
if (TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists, isFriend, isMemberSpecialization,
Invalid)) {
if (TemplateParams->size() > 0) {
// This is a function template
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
NewFD->setInvalidDecl();
// A destructor cannot be a template.
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
Diag(NewFD->getLocation(), diag::err_destructor_template);
NewFD->setInvalidDecl();
}
// If we're adding a template to a dependent context, we may need to
// rebuilding some of the types used within the template parameter list,
// now that we know what the current instantiation is.
if (DC->isDependentContext()) {
ContextRAII SavedContext(*this, DC);
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
}
FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
NewFD->getLocation(),
Name, TemplateParams,
NewFD);
FunctionTemplate->setLexicalDeclContext(CurContext);
NewFD->setDescribedFunctionTemplate(FunctionTemplate);
// For source fidelity, store the other template param lists.
if (TemplateParamLists.size() > 1) {
NewFD->setTemplateParameterListsInfo(Context,
TemplateParamLists.drop_back(1));
}
} else {
// This is a function template specialization.
isFunctionTemplateSpecialization = true;
// For source fidelity, store all the template param lists.
if (TemplateParamLists.size() > 0)
NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
// C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
if (isFriend) {
// We want to remove the "template<>", found here.
SourceRange RemoveRange = TemplateParams->getSourceRange();
// If we remove the template<> and the name is not a
// template-id, we're actually silently creating a problem:
// the friend declaration will refer to an untemplated decl,
// and clearly the user wants a template specialization. So
// we need to insert '<>' after the name.
SourceLocation InsertLoc;
if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
InsertLoc = D.getName().getSourceRange().getEnd();
InsertLoc = getLocForEndOfToken(InsertLoc);
}
Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
<< Name << RemoveRange
<< FixItHint::CreateRemoval(RemoveRange)
<< FixItHint::CreateInsertion(InsertLoc, "<>");
}
}
} else {
// All template param lists were matched against the scope specifier:
// this is NOT (an explicit specialization of) a template.
if (TemplateParamLists.size() > 0)
// For source fidelity, store all the template param lists.
NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
}
if (Invalid) {
NewFD->setInvalidDecl();
if (FunctionTemplate)
FunctionTemplate->setInvalidDecl();
}
// C++ [dcl.fct.spec]p5:
// The virtual specifier shall only be used in declarations of
// nonstatic class member functions that appear within a
// member-specification of a class declaration; see 10.3.
//
if (isVirtual && !NewFD->isInvalidDecl()) {
if (!isVirtualOkay) {
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_non_function);
} else if (!CurContext->isRecord()) {
// 'virtual' was specified outside of the class.
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_out_of_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
} else if (NewFD->getDescribedFunctionTemplate()) {
// C++ [temp.mem]p3:
// A member function template shall not be virtual.
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_member_function_template)
<< FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
} else {
// Okay: Add virtual to the method.
NewFD->setVirtualAsWritten(true);
}
if (getLangOpts().CPlusPlus14 &&
NewFD->getReturnType()->isUndeducedType())
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
}
if (getLangOpts().CPlusPlus14 &&
(NewFD->isDependentContext() ||
(isFriend && CurContext->isDependentContext())) &&
NewFD->getReturnType()->isUndeducedType()) {
// If the function template is referenced directly (for instance, as a
// member of the current instantiation), pretend it has a dependent type.
// This is not really justified by the standard, but is the only sane
// thing to do.
// FIXME: For a friend function, we have not marked the function as being
// a friend yet, so 'isDependentContext' on the FD doesn't work.
const FunctionProtoType *FPT =
NewFD->getType()->castAs<FunctionProtoType>();
QualType Result =
SubstAutoType(FPT->getReturnType(), Context.DependentTy);
NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
FPT->getExtProtoInfo()));
}
// C++ [dcl.fct.spec]p3:
// The inline specifier shall not appear on a block scope function
// declaration.
if (isInline && !NewFD->isInvalidDecl()) {
if (CurContext->isFunctionOrMethod()) {
// 'inline' is not allowed on block scope function declaration.
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::err_inline_declaration_block_scope) << Name
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
}
}
// C++ [dcl.fct.spec]p6:
// The explicit specifier shall be used only in the declaration of a
// constructor or conversion function within its class definition;
// see 12.3.1 and 12.3.2.
if (hasExplicit && !NewFD->isInvalidDecl() &&
!isa<CXXDeductionGuideDecl>(NewFD)) {
if (!CurContext->isRecord()) {
// 'explicit' was specified outside of the class.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_out_of_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecRange());
} else if (!isa<CXXConstructorDecl>(NewFD) &&
!isa<CXXConversionDecl>(NewFD)) {
// 'explicit' was specified on a function that wasn't a constructor
// or conversion function.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_non_ctor_or_conv_function)
<< FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecRange());
}
}
if (ConstexprKind != CSK_unspecified) {
// C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors
// are implicitly inline.
NewFD->setImplicitlyInline();
// C++11 [dcl.constexpr]p3: functions declared constexpr are required to
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
if (isa<CXXDestructorDecl>(NewFD))
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor)
<< (ConstexprKind == CSK_consteval);
}
// If __module_private__ was specified, mark the function accordingly.
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (isFunctionTemplateSpecialization) {
SourceLocation ModulePrivateLoc
= D.getDeclSpec().getModulePrivateSpecLoc();
Diag(ModulePrivateLoc, diag::err_module_private_specialization)
<< 0
<< FixItHint::CreateRemoval(ModulePrivateLoc);
} else {
NewFD->setModulePrivate();
if (FunctionTemplate)
FunctionTemplate->setModulePrivate();
}
}
if (isFriend) {
if (FunctionTemplate) {
FunctionTemplate->setObjectOfFriendDecl();
FunctionTemplate->setAccess(AS_public);
}
NewFD->setObjectOfFriendDecl();
NewFD->setAccess(AS_public);
}
// If a function is defined as defaulted or deleted, mark it as such now.
// FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
// definition kind to FDK_Definition.
switch (D.getFunctionDefinitionKind()) {
case FDK_Declaration:
case FDK_Definition:
break;
case FDK_Defaulted:
NewFD->setDefaulted();
break;
case FDK_Deleted:
NewFD->setDeletedAsWritten();
break;
}
if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
D.isFunctionDefinition()) {
// C++ [class.mfct]p2:
// A member function may be defined (8.4) in its class definition, in
// which case it is an inline member function (7.1.2)
NewFD->setImplicitlyInline();
}
if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
!CurContext->isRecord()) {
// C++ [class.static]p1:
// A data or function member of a class may be declared static
// in a class definition, in which case it is a static member of
// the class.
// Complain about the 'static' specifier if it's on an out-of-line
// member function definition.
// MSVC permits the use of a 'static' storage specifier on an out-of-line
// member function template declaration and class member template
// declaration (MSVC versions before 2015), warn about this.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
((!getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
cast<CXXRecordDecl>(DC)->getDescribedClassTemplate()) ||
(getLangOpts().MSVCCompat && NewFD->getDescribedFunctionTemplate()))
? diag::ext_static_out_of_line : diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
// C++11 [except.spec]p15:
// A deallocation function with no exception-specification is treated
// as if it were specified with noexcept(true).
const FunctionProtoType *FPT = R->getAs<FunctionProtoType>();
if ((Name.getCXXOverloadedOperator() == OO_Delete ||
Name.getCXXOverloadedOperator() == OO_Array_Delete) &&
getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec())
NewFD->setType(Context.getFunctionType(
FPT->getReturnType(), FPT->getParamTypes(),
FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept)));
}
// Filter out previous declarations that don't match the scope.
FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewFD),
D.getCXXScopeSpec().isNotEmpty() ||
isMemberSpecialization ||
isFunctionTemplateSpecialization);
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*) D.getAsmLabel()) {
// The parser guarantees this is a string.
StringLiteral *SE = cast<StringLiteral>(E);
NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
SE->getString(), 0));
} else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
if (isDeclExternC(NewFD)) {
NewFD->addAttr(I->second);
ExtnameUndeclaredIdentifiers.erase(I);
} else
Diag(NewFD->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/0 << NewFD;
}
}
// Copy the parameter declarations from the declarator D to the function
// declaration NewFD, if they are available. First scavenge them into Params.
SmallVector<ParmVarDecl*, 16> Params;
unsigned FTIIdx;
if (D.isFunctionDeclarator(FTIIdx)) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(FTIIdx).Fun;
// Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
// function that takes no arguments, not a function that takes a
// single void argument.
// We let through "const void" here because Sema::GetTypeForDeclarator
// already checks for that case.
if (FTIHasNonVoidParameters(FTI) && FTI.Params[0].Param) {
for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
assert(Param->getDeclContext() != NewFD && "Was set before ?");
Param->setDeclContext(NewFD);
Params.push_back(Param);
if (Param->isInvalidDecl())
NewFD->setInvalidDecl();
}
}
if (!getLangOpts().CPlusPlus) {
// In C, find all the tag declarations from the prototype and move them
// into the function DeclContext. Remove them from the surrounding tag
// injection context of the function, which is typically but not always
// the TU.
DeclContext *PrototypeTagContext =
getTagInjectionContext(NewFD->getLexicalDeclContext());
for (NamedDecl *NonParmDecl : FTI.getDeclsInPrototype()) {
auto *TD = dyn_cast<TagDecl>(NonParmDecl);
// We don't want to reparent enumerators. Look at their parent enum
// instead.
if (!TD) {
if (auto *ECD = dyn_cast<EnumConstantDecl>(NonParmDecl))
TD = cast<EnumDecl>(ECD->getDeclContext());
}
if (!TD)
continue;
DeclContext *TagDC = TD->getLexicalDeclContext();
if (!TagDC->containsDecl(TD))
continue;
TagDC->removeDecl(TD);
TD->setDeclContext(NewFD);
NewFD->addDecl(TD);
// Preserve the lexical DeclContext if it is not the surrounding tag
// injection context of the FD. In this example, the semantic context of
// E will be f and the lexical context will be S, while both the
// semantic and lexical contexts of S will be f:
// void f(struct S { enum E { a } f; } s);
if (TagDC != PrototypeTagContext)
TD->setLexicalDeclContext(TagDC);
}
}
} else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) {
// When we're declaring a function with a typedef, typeof, etc as in the
// following example, we'll need to synthesize (unnamed)
// parameters for use in the declaration.
//
// @code
// typedef void fn(int);
// fn f;
// @endcode
// Synthesize a parameter for each argument type.
for (const auto &AI : FT->param_types()) {
ParmVarDecl *Param =
BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), AI);
Param->setScopeInfo(0, Params.size());
Params.push_back(Param);
}
} else {
assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 &&
"Should not need args for typedef of non-prototype fn");
}
// Finally, we know we have the right number of parameters, install them.
NewFD->setParams(Params);
if (D.getDeclSpec().isNoreturnSpecified())
NewFD->addAttr(
::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(),
Context, 0));
// Functions returning a variably modified type violate C99 6.7.5.2p2
// because all functions have linkage.
if (!NewFD->isInvalidDecl() &&
NewFD->getReturnType()->isVariablyModifiedType()) {
Diag(NewFD->getLocation(), diag::err_vm_func_decl);
NewFD->setInvalidDecl();
}
// Apply an implicit SectionAttr if '#pragma clang section text' is active
if (PragmaClangTextSection.Valid && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(PragmaClangTextSectionAttr::CreateImplicit(Context,
PragmaClangTextSection.SectionName,
PragmaClangTextSection.PragmaLocation));
}
// Apply an implicit SectionAttr if #pragma code_seg is active.
if (CodeSegStack.CurrentValue && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(
SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate,
CodeSegStack.CurrentValue->getString(),
CodeSegStack.CurrentPragmaLocation));
if (UnifySection(CodeSegStack.CurrentValue->getString(),
ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
ASTContext::PSF_Read,
NewFD))
NewFD->dropAttr<SectionAttr>();
}
// Apply an implicit CodeSegAttr from class declspec or
// apply an implicit SectionAttr from #pragma code_seg if active.
if (!NewFD->hasAttr<CodeSegAttr>()) {
if (Attr *SAttr = getImplicitCodeSegOrSectionAttrForFunction(NewFD,
D.isFunctionDefinition())) {
NewFD->addAttr(SAttr);
}
}
// Handle attributes.
ProcessDeclAttributes(S, NewFD, D);
if (getLangOpts().OpenCL) {
// OpenCL v1.1 s6.5: Using an address space qualifier in a function return
// type declaration will generate a compilation error.
LangAS AddressSpace = NewFD->getReturnType().getAddressSpace();
if (AddressSpace != LangAS::Default) {
Diag(NewFD->getLocation(),
diag::err_opencl_return_value_with_address_space);
NewFD->setInvalidDecl();
}
}
if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
if (!NewFD->isInvalidDecl() && NewFD->isMain())
CheckMain(NewFD, D.getDeclSpec());
if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
CheckMSVCRTEntryPoint(NewFD);
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
isMemberSpecialization));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
// Diagnose no-prototype function declarations with calling conventions that
// don't support variadic calls. Only do this in C and do it after merging
// possibly prototyped redeclarations.
const FunctionType *FT = NewFD->getType()->castAs<FunctionType>();
if (isa<FunctionNoProtoType>(FT) && !D.isFunctionDefinition()) {
CallingConv CC = FT->getExtInfo().getCC();
if (!supportsVariadicCall(CC)) {
// Windows system headers sometimes accidentally use stdcall without
// (void) parameters, so we relax this to a warning.
int DiagID =
CC == CC_X86StdCall ? diag::warn_cconv_knr : diag::err_cconv_knr;
Diag(NewFD->getLocation(), DiagID)
<< FunctionType::getNameForCallConv(CC);
}
}
if (NewFD->getReturnType().hasNonTrivialToPrimitiveDestructCUnion() ||
NewFD->getReturnType().hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnion(NewFD->getReturnType(),
NewFD->getReturnTypeSourceRange().getBegin(),
NTCUC_FunctionReturn, NTCUK_Destruct|NTCUK_Copy);
} else {
// C++11 [replacement.functions]p3:
// The program's definitions shall not be specified as inline.
//
// N.B. We diagnose declarations instead of definitions per LWG issue 2340.
//
// Suppress the diagnostic if the function is __attribute__((used)), since
// that forces an external definition to be emitted.
if (D.getDeclSpec().isInlineSpecified() &&
NewFD->isReplaceableGlobalAllocationFunction() &&
!NewFD->hasAttr<UsedAttr>())
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::ext_operator_new_delete_declared_inline)
<< NewFD->getDeclName();
// If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
translateTemplateArguments(TemplateArgsPtr,
TemplateArgs);
HasExplicitTemplateArgs = true;
if (NewFD->isInvalidDecl()) {
HasExplicitTemplateArgs = false;
} else if (FunctionTemplate) {
// Function template with explicit template arguments.
Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
<< SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
HasExplicitTemplateArgs = false;
} else {
assert((isFunctionTemplateSpecialization ||
D.getDeclSpec().isFriendSpecified()) &&
"should have a 'template<>' for this decl");
// "friend void foo<>(int);" is an implicit specialization decl.
isFunctionTemplateSpecialization = true;
}
} else if (isFriend && isFunctionTemplateSpecialization) {
// This combination is only possible in a recovery case; the user
// wrote something like:
// template <> friend void foo(int);
// which we're recovering from as if the user had written:
// friend void foo<>(int);
// Go ahead and fake up a template id.
HasExplicitTemplateArgs = true;
TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
}
// We do not add HD attributes to specializations here because
// they may have different constexpr-ness compared to their
// templates and, after maybeAddCUDAHostDeviceAttrs() is applied,
// may end up with different effective targets. Instead, a
// specialization inherits its target attributes from its template
// in the CheckFunctionTemplateSpecialization() call below.
if (getLangOpts().CUDA & !isFunctionTemplateSpecialization)
maybeAddCUDAHostDeviceAttrs(NewFD, Previous);
// If it's a friend (and only if it's a friend), it's possible
// that either the specialized function type or the specialized
// template is dependent, and therefore matching will fail. In
// this case, don't check the specialization yet.
bool InstantiationDependent = false;
if (isFunctionTemplateSpecialization && isFriend &&
(NewFD->getType()->isDependentType() || DC->isDependentContext() ||
TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs,
InstantiationDependent))) {
assert(HasExplicitTemplateArgs &&
"friend function specialization without template args");
if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
Previous))
NewFD->setInvalidDecl();
} else if (isFunctionTemplateSpecialization) {
if (CurContext->isDependentContext() && CurContext->isRecord()
&& !isFriend) {
isDependentClassScopeExplicitSpecialization = true;
} else if (!NewFD->isInvalidDecl() &&
CheckFunctionTemplateSpecialization(
NewFD, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr),
Previous))
NewFD->setInvalidDecl();
// C++ [dcl.stc]p1:
// A storage-class-specifier shall not be specified in an explicit
// specialization (14.7.3)
FunctionTemplateSpecializationInfo *Info =
NewFD->getTemplateSpecializationInfo();
if (Info && SC != SC_None) {
if (SC != Info->getTemplate()->getTemplatedDecl()->getStorageClass())
Diag(NewFD->getLocation(),
diag::err_explicit_specialization_inconsistent_storage_class)
<< SC
<< FixItHint::CreateRemoval(
D.getDeclSpec().getStorageClassSpecLoc());
else
Diag(NewFD->getLocation(),
diag::ext_explicit_specialization_storage_class)
<< FixItHint::CreateRemoval(
D.getDeclSpec().getStorageClassSpecLoc());
}
} else if (isMemberSpecialization && isa<CXXMethodDecl>(NewFD)) {
if (CheckMemberSpecialization(NewFD, Previous))
NewFD->setInvalidDecl();
}
// Perform semantic checking on the function declaration.
if (!isDependentClassScopeExplicitSpecialization) {
if (!NewFD->isInvalidDecl() && NewFD->isMain())
CheckMain(NewFD, D.getDeclSpec());
if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
CheckMSVCRTEntryPoint(NewFD);
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
isMemberSpecialization));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
}
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
NamedDecl *PrincipalDecl = (FunctionTemplate
? cast<NamedDecl>(FunctionTemplate)
: NewFD);
if (isFriend && NewFD->getPreviousDecl()) {
AccessSpecifier Access = AS_public;
if (!NewFD->isInvalidDecl())
Access = NewFD->getPreviousDecl()->getAccess();
NewFD->setAccess(Access);
if (FunctionTemplate) FunctionTemplate->setAccess(Access);
}
if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
PrincipalDecl->setNonMemberOperator();
// If we have a function template, check the template parameter
// list. This will check and merge default template arguments.
if (FunctionTemplate) {
FunctionTemplateDecl *PrevTemplate =
FunctionTemplate->getPreviousDecl();
CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
PrevTemplate ? PrevTemplate->getTemplateParameters()
: nullptr,
D.getDeclSpec().isFriendSpecified()
? (D.isFunctionDefinition()
? TPC_FriendFunctionTemplateDefinition
: TPC_FriendFunctionTemplate)
: (D.getCXXScopeSpec().isSet() &&
DC && DC->isRecord() &&
DC->isDependentContext())
? TPC_ClassTemplateMember
: TPC_FunctionTemplate);
}
if (NewFD->isInvalidDecl()) {
// Ignore all the rest of this.
} else if (!D.isRedeclaration()) {
struct ActOnFDArgs ExtraArgs = { S, D, TemplateParamLists,
AddToScope };
// Fake up an access specifier if it's supposed to be a class member.
if (isa<CXXRecordDecl>(NewFD->getDeclContext()))
NewFD->setAccess(AS_public);
// Qualified decls generally require a previous declaration.
if (D.getCXXScopeSpec().isSet()) {
// ...with the major exception of templated-scope or
// dependent-scope friend declarations.
// TODO: we currently also suppress this check in dependent
// contexts because (1) the parameter depth will be off when
// matching friend templates and (2) we might actually be
// selecting a friend based on a dependent factor. But there
// are situations where these conditions don't apply and we
// can actually do this check immediately.
//
// Unless the scope is dependent, it's always an error if qualified
// redeclaration lookup found nothing at all. Diagnose that now;
// nothing will diagnose that error later.
if (isFriend &&
(D.getCXXScopeSpec().getScopeRep()->isDependent() ||
(!Previous.empty() && CurContext->isDependentContext()))) {
// ignore these
} else {
// The user tried to provide an out-of-line definition for a
// function that is a member of a class or namespace, but there
// was no such member function declared (C++ [class.mfct]p2,
// C++ [namespace.memdef]p2). For example:
//
// class X {
// void f() const;
// };
//
// void X::f() { } // ill-formed
//
// Complain about this problem, and attempt to suggest close
// matches (e.g., those that differ only in cv-qualifiers and
// whether the parameter types are references).
if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
*this, Previous, NewFD, ExtraArgs, false, nullptr)) {
AddToScope = ExtraArgs.AddToScope;
return Result;
}
}
// Unqualified local friend declarations are required to resolve
// to something.
} else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) {
if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
*this, Previous, NewFD, ExtraArgs, true, S)) {
AddToScope = ExtraArgs.AddToScope;
return Result;
}
}
} else if (!D.isFunctionDefinition() &&
isa<CXXMethodDecl>(NewFD) && NewFD->isOutOfLine() &&
!isFriend && !isFunctionTemplateSpecialization &&
!isMemberSpecialization) {
// An out-of-line member function declaration must also be a
// definition (C++ [class.mfct]p2).
// Note that this is not the case for explicit specializations of
// function templates or member functions of class templates, per
// C++ [temp.expl.spec]p2. We also allow these declarations as an
// extension for compatibility with old SWIG code which likes to
// generate them.
Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
<< D.getCXXScopeSpec().getRange();
}
}
ProcessPragmaWeak(S, NewFD);
checkAttributesAfterMerging(*this, *NewFD);
AddKnownFunctionAttributes(NewFD);
if (NewFD->hasAttr<OverloadableAttr>() &&
!NewFD->getType()->getAs<FunctionProtoType>()) {
Diag(NewFD->getLocation(),
diag::err_attribute_overloadable_no_prototype)
<< NewFD;
// Turn this into a variadic function with no parameters.
const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
FunctionProtoType::ExtProtoInfo EPI(
Context.getDefaultCallingConvention(true, false));
EPI.Variadic = true;
EPI.ExtInfo = FT->getExtInfo();
QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI);
NewFD->setType(R);
}
// If there's a #pragma GCC visibility in scope, and this isn't a class
// member, set the visibility of this function.
if (!DC->isRecord() && NewFD->isExternallyVisible())
AddPushedVisibilityAttribute(NewFD);
// If there's a #pragma clang arc_cf_code_audited in scope, consider
// marking the function.
AddCFAuditedAttribute(NewFD);
// If this is a function definition, check if we have to apply optnone due to
// a pragma.
if(D.isFunctionDefinition())
AddRangeBasedOptnone(NewFD);
// If this is the first declaration of an extern C variable, update
// the map of such variables.
if (NewFD->isFirstDecl() && !NewFD->isInvalidDecl() &&
isIncompleteDeclExternC(*this, NewFD))
RegisterLocallyScopedExternCDecl(NewFD, S);
// Set this FunctionDecl's range up to the right paren.
NewFD->setRangeEnd(D.getSourceRange().getEnd());
if (D.isRedeclaration() && !Previous.empty()) {
NamedDecl *Prev = Previous.getRepresentativeDecl();
checkDLLAttributeRedeclaration(*this, Prev, NewFD,
isMemberSpecialization ||
isFunctionTemplateSpecialization,
D.isFunctionDefinition());
}
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
if (II && II->isStr(getCudaConfigureFuncName()) &&
!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return)
<< getCudaConfigureFuncName();
Context.setcudaConfigureCallDecl(NewFD);
}
// Variadic functions, other than a *declaration* of printf, are not allowed
// in device-side CUDA code, unless someone passed
// -fcuda-allow-variadic-functions.
if (!getLangOpts().CUDAAllowVariadicFunctions && NewFD->isVariadic() &&
(NewFD->hasAttr<CUDADeviceAttr>() ||
NewFD->hasAttr<CUDAGlobalAttr>()) &&
!(II && II->isStr("printf") && NewFD->isExternC() &&
!D.isFunctionDefinition())) {
Diag(NewFD->getLocation(), diag::err_variadic_device_fn);
}
}
MarkUnusedFileScopedDecl(NewFD);
if (getLangOpts().OpenCL && NewFD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL v1.2 s6.8 static is invalid for kernel functions.
if ((getLangOpts().OpenCLVersion >= 120)
&& (SC == SC_Static)) {
Diag(D.getIdentifierLoc(), diag::err_static_kernel);
D.setInvalidType();
}
// OpenCL v1.2, s6.9 -- Kernels can only have return type void.
if (!NewFD->getReturnType()->isVoidType()) {
SourceRange RTRange = NewFD->getReturnTypeSourceRange();
Diag(D.getIdentifierLoc(), diag::err_expected_kernel_void_return_type)
<< (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void")
: FixItHint());
D.setInvalidType();
}
llvm::SmallPtrSet<const Type *, 16> ValidTypes;
for (auto Param : NewFD->parameters())
checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
if (getLangOpts().OpenCLCPlusPlus) {
if (DC->isRecord()) {
Diag(D.getIdentifierLoc(), diag::err_method_kernel);
D.setInvalidType();
}
if (FunctionTemplate) {
Diag(D.getIdentifierLoc(), diag::err_template_kernel);
D.setInvalidType();
}
}
}
if (getLangOpts().CPlusPlus) {
if (FunctionTemplate) {
if (NewFD->isInvalidDecl())
FunctionTemplate->setInvalidDecl();
return FunctionTemplate;
}
if (isMemberSpecialization && !NewFD->isInvalidDecl())
CompleteMemberSpecialization(NewFD, Previous);
}
for (const ParmVarDecl *Param : NewFD->parameters()) {
QualType PT = Param->getType();
// OpenCL 2.0 pipe restrictions forbids pipe packet types to be non-value
// types.
if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) {
if(const PipeType *PipeTy = PT->getAs<PipeType>()) {
QualType ElemTy = PipeTy->getElementType();
if (ElemTy->isReferenceType() || ElemTy->isPointerType()) {
Diag(Param->getTypeSpecStartLoc(), diag::err_reference_pipe_type );
D.setInvalidType();
}
}
}
}
// Here we have an function template explicit specialization at class scope.
// The actual specialization will be postponed to template instatiation
// time via the ClassScopeFunctionSpecializationDecl node.
if (isDependentClassScopeExplicitSpecialization) {
ClassScopeFunctionSpecializationDecl *NewSpec =
ClassScopeFunctionSpecializationDecl::Create(
Context, CurContext, NewFD->getLocation(),
cast<CXXMethodDecl>(NewFD),
HasExplicitTemplateArgs, TemplateArgs);
CurContext->addDecl(NewSpec);
AddToScope = false;
}
// Diagnose availability attributes. Availability cannot be used on functions
// that are run during load/unload.
if (const auto *attr = NewFD->getAttr<AvailabilityAttr>()) {
if (NewFD->hasAttr<ConstructorAttr>()) {
Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
<< 1;
NewFD->dropAttr<AvailabilityAttr>();
}
if (NewFD->hasAttr<DestructorAttr>()) {
Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
<< 2;
NewFD->dropAttr<AvailabilityAttr>();
}
}
return NewFD;
}
/// Return a CodeSegAttr from a containing class. The Microsoft docs say
/// when __declspec(code_seg) "is applied to a class, all member functions of
/// the class and nested classes -- this includes compiler-generated special
/// member functions -- are put in the specified segment."
/// The actual behavior is a little more complicated. The Microsoft compiler
/// won't check outer classes if there is an active value from #pragma code_seg.
/// The CodeSeg is always applied from the direct parent but only from outer
/// classes when the #pragma code_seg stack is empty. See:
/// https://reviews.llvm.org/D22931, the Microsoft feedback page is no longer
/// available since MS has removed the page.
static Attr *getImplicitCodeSegAttrFromClass(Sema &S, const FunctionDecl *FD) {
const auto *Method = dyn_cast<CXXMethodDecl>(FD);
if (!Method)
return nullptr;
const CXXRecordDecl *Parent = Method->getParent();
if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
Attr *NewAttr = SAttr->clone(S.getASTContext());
NewAttr->setImplicit(true);
return NewAttr;
}
// The Microsoft compiler won't check outer classes for the CodeSeg
// when the #pragma code_seg stack is active.
if (S.CodeSegStack.CurrentValue)
return nullptr;
while ((Parent = dyn_cast<CXXRecordDecl>(Parent->getParent()))) {
if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
Attr *NewAttr = SAttr->clone(S.getASTContext());
NewAttr->setImplicit(true);
return NewAttr;
}
}
return nullptr;
}
/// Returns an implicit CodeSegAttr if a __declspec(code_seg) is found on a
/// containing class. Otherwise it will return implicit SectionAttr if the
/// function is a definition and there is an active value on CodeSegStack
/// (from the current #pragma code-seg value).
///
/// \param FD Function being declared.
/// \param IsDefinition Whether it is a definition or just a declarartion.
/// \returns A CodeSegAttr or SectionAttr to apply to the function or
/// nullptr if no attribute should be added.
Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition) {
if (Attr *A = getImplicitCodeSegAttrFromClass(*this, FD))
return A;
if (!FD->hasAttr<SectionAttr>() && IsDefinition &&
CodeSegStack.CurrentValue) {
return SectionAttr::CreateImplicit(getASTContext(),
SectionAttr::Declspec_allocate,
CodeSegStack.CurrentValue->getString(),
CodeSegStack.CurrentPragmaLocation);
}
return nullptr;
}
/// Determines if we can perform a correct type check for \p D as a
/// redeclaration of \p PrevDecl. If not, we can generally still perform a
/// best-effort check.
///
/// \param NewD The new declaration.
/// \param OldD The old declaration.
/// \param NewT The portion of the type of the new declaration to check.
/// \param OldT The portion of the type of the old declaration to check.
bool Sema::canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT) {
if (!NewD->getLexicalDeclContext()->isDependentContext())
return true;
// For dependently-typed local extern declarations and friends, we can't
// perform a correct type check in general until instantiation:
//
// int f();
// template<typename T> void g() { T f(); }
//
// (valid if g() is only instantiated with T = int).
if (NewT->isDependentType() &&
(NewD->isLocalExternDecl() || NewD->getFriendObjectKind()))
return false;
// Similarly, if the previous declaration was a dependent local extern
// declaration, we don't really know its type yet.
if (OldT->isDependentType() && OldD->isLocalExternDecl())
return false;
return true;
}
/// Checks if the new declaration declared in dependent context must be
/// put in the same redeclaration chain as the specified declaration.
///
/// \param D Declaration that is checked.
/// \param PrevDecl Previous declaration found with proper lookup method for the
/// same declaration name.
/// \returns True if D must be added to the redeclaration chain which PrevDecl
/// belongs to.
///
bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
if (!D->getLexicalDeclContext()->isDependentContext())
return true;
// Don't chain dependent friend function definitions until instantiation, to
// permit cases like
//
// void func();
// template<typename T> class C1 { friend void func() {} };
// template<typename T> class C2 { friend void func() {} };
//
// ... which is valid if only one of C1 and C2 is ever instantiated.
//
// FIXME: This need only apply to function definitions. For now, we proxy
// this by checking for a file-scope function. We do not want this to apply
// to friend declarations nominating member functions, because that gets in
// the way of access checks.
if (D->getFriendObjectKind() && D->getDeclContext()->isFileContext())
return false;
auto *VD = dyn_cast<ValueDecl>(D);
auto *PrevVD = dyn_cast<ValueDecl>(PrevDecl);
return !VD || !PrevVD ||
canFullyTypeCheckRedeclaration(VD, PrevVD, VD->getType(),
PrevVD->getType());
}
/// Check the target attribute of the function for MultiVersion
/// validity.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
const auto *TA = FD->getAttr<TargetAttr>();
assert(TA && "MultiVersion Candidate requires a target attribute");
TargetAttr::ParsedTargetAttr ParseInfo = TA->parse();
const TargetInfo &TargetInfo = S.Context.getTargetInfo();
enum ErrType { Feature = 0, Architecture = 1 };
if (!ParseInfo.Architecture.empty() &&
!TargetInfo.validateCpuIs(ParseInfo.Architecture)) {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Architecture << ParseInfo.Architecture;
return true;
}
for (const auto &Feat : ParseInfo.Features) {
auto BareFeat = StringRef{Feat}.substr(1);
if (Feat[0] == '-') {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Feature << ("no-" + BareFeat).str();
return true;
}
if (!TargetInfo.validateCpuSupports(BareFeat) ||
!TargetInfo.isValidFeatureName(BareFeat)) {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Feature << BareFeat;
return true;
}
}
return false;
}
static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
MultiVersionKind MVType) {
for (const Attr *A : FD->attrs()) {
switch (A->getKind()) {
case attr::CPUDispatch:
case attr::CPUSpecific:
if (MVType != MultiVersionKind::CPUDispatch &&
MVType != MultiVersionKind::CPUSpecific)
return true;
break;
case attr::Target:
if (MVType != MultiVersionKind::Target)
return true;
break;
default:
return true;
}
}
return false;
}
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
MultiVersionKind MVType) {
enum DoesntSupport {
FuncTemplates = 0,
VirtFuncs = 1,
DeducedReturn = 2,
Constructors = 3,
Destructors = 4,
DeletedFuncs = 5,
DefaultedFuncs = 6,
ConstexprFuncs = 7,
ConstevalFuncs = 8,
};
enum Different {
CallingConv = 0,
ReturnType = 1,
ConstexprSpec = 2,
InlineSpec = 3,
StorageClass = 4,
Linkage = 5
};
bool IsCPUSpecificCPUDispatchMVType =
MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific;
if (OldFD && !OldFD->getType()->getAs<FunctionProtoType>()) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_noproto);
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
if (!NewFD->getType()->getAs<FunctionProtoType>())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_noproto);
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
if (OldFD)
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
return true;
}
// For now, disallow all other attributes. These should be opt-in, but
// an analysis of all of them is a future FIXME.
if (CausesMV && OldFD && HasNonMultiVersionAttributes(OldFD, MVType)) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
if (HasNonMultiVersionAttributes(NewFD, MVType))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
if (NewFD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << FuncTemplates;
if (const auto *NewCXXFD = dyn_cast<CXXMethodDecl>(NewFD)) {
if (NewCXXFD->isVirtual())
return S.Diag(NewCXXFD->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << VirtFuncs;
if (const auto *NewCXXCtor = dyn_cast<CXXConstructorDecl>(NewFD))
return S.Diag(NewCXXCtor->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << Constructors;
if (const auto *NewCXXDtor = dyn_cast<CXXDestructorDecl>(NewFD))
return S.Diag(NewCXXDtor->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << Destructors;
}
if (NewFD->isDeleted())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DeletedFuncs;
if (NewFD->isDefaulted())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DefaultedFuncs;
if (NewFD->isConstexpr() && (MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType
<< (NewFD->isConsteval() ? ConstevalFuncs : ConstexprFuncs);
QualType NewQType = S.getASTContext().getCanonicalType(NewFD->getType());
const auto *NewType = cast<FunctionType>(NewQType);
QualType NewReturnType = NewType->getReturnType();
if (NewReturnType->isUndeducedType())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DeducedReturn;
// Only allow transition to MultiVersion if it hasn't been used.
if (OldFD && CausesMV && OldFD->isUsed(false))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_after_used);
// Ensure the return type is identical.
if (OldFD) {
QualType OldQType = S.getASTContext().getCanonicalType(OldFD->getType());
const auto *OldType = cast<FunctionType>(OldQType);
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
if (OldTypeInfo.getCC() != NewTypeInfo.getCC())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< CallingConv;
QualType OldReturnType = OldType->getReturnType();
if (OldReturnType != NewReturnType)
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ReturnType;
if (OldFD->getConstexprKind() != NewFD->getConstexprKind())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ConstexprSpec;
if (OldFD->isInlineSpecified() != NewFD->isInlineSpecified())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< InlineSpec;
if (OldFD->getStorageClass() != NewFD->getStorageClass())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< StorageClass;
if (OldFD->isExternC() != NewFD->isExternC())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< Linkage;
if (S.CheckEquivalentExceptionSpec(
OldFD->getType()->getAs<FunctionProtoType>(), OldFD->getLocation(),
NewFD->getType()->getAs<FunctionProtoType>(), NewFD->getLocation()))
return true;
}
return false;
}
/// Check the validity of a multiversion function declaration that is the
/// first of its kind. Also sets the multiversion'ness' of the function itself.
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
MultiVersionKind MVType,
const TargetAttr *TA) {
assert(MVType != MultiVersionKind::None &&
"Function lacks multiversion attribute");
// Target only causes MV if it is default, otherwise this is a normal
// function.
if (MVType == MultiVersionKind::Target && !TA->isDefaultVersion())
return false;
if (MVType == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVType)) {
FD->setInvalidDecl();
return true;
}
FD->setIsMultiVersion();
return false;
}
static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
for (const Decl *D = FD->getPreviousDecl(); D; D = D->getPreviousDecl()) {
if (D->getAsFunction()->getMultiVersionKind() != MultiVersionKind::None)
return true;
}
return false;
}
static bool CheckTargetCausesMultiVersioning(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *OldTA = OldFD->getAttr<TargetAttr>();
TargetAttr::ParsedTargetAttr NewParsed = NewTA->parse();
// Sort order doesn't matter, it just needs to be consistent.
llvm::sort(NewParsed.Features);
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
if (!NewTA->isDefaultVersion() &&
(!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr()))
return false;
// Otherwise, this decl causes MultiVersioning.
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
MultiVersionKind::Target)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
// If this is 'default', permit the forward declaration.
if (!OldFD->isMultiVersion() && !OldTA && NewTA->isDefaultVersion()) {
Redeclaration = true;
OldDecl = OldFD;
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
return false;
}
if (CheckMultiVersionValue(S, OldFD)) {
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
}
TargetAttr::ParsedTargetAttr OldParsed =
OldTA->parse(std::less<std::string>());
if (OldParsed == NewParsed) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
// We allow forward declarations before ANY multiversioning attributes, but
// nothing after the fact.
if (PreviousDeclsHaveMultiVersionAttribute(FD) &&
(!CurTA || CurTA->isInherited())) {
S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
<< 0;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
}
}
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
MultiVersionKind NewMVType, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
if ((OldMVType == MultiVersionKind::Target &&
NewMVType != MultiVersionKind::Target) ||
(NewMVType == MultiVersionKind::Target &&
OldMVType != MultiVersionKind::Target)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
TargetAttr::ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = NewTA->parse();
llvm::sort(NewParsed.Features);
}
bool UseMemberUsingDeclRules =
S.CurContext->isRecord() && !NewFD->getFriendObjectKind();
// Next, check ALL non-overloads to see if this is a redeclaration of a
// previous member of the MultiVersion set.
for (NamedDecl *ND : Previous) {
FunctionDecl *CurFD = ND->getAsFunction();
if (!CurFD)
continue;
if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
if (NewMVType == MultiVersionKind::Target) {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
TargetAttr::ParsedTargetAttr CurParsed =
CurTA->parse(std::less<std::string>());
if (CurParsed == NewParsed) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
} else {
const auto *CurCPUSpec = CurFD->getAttr<CPUSpecificAttr>();
const auto *CurCPUDisp = CurFD->getAttr<CPUDispatchAttr>();
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
if (NewMVType == MultiVersionKind::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
CurCPUDisp->cpus_begin(), CurCPUDisp->cpus_end(),
NewCPUDisp->cpus_begin(),
[](const IdentifierInfo *Cur, const IdentifierInfo *New) {
return Cur->getName() == New->getName();
})) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
// If the declarations don't match, this is an error condition.
S.Diag(NewFD->getLocation(), diag::err_cpu_dispatch_mismatch);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
if (NewMVType == MultiVersionKind::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
CurCPUSpec->cpus_begin(), CurCPUSpec->cpus_end(),
NewCPUSpec->cpus_begin(),
[](const IdentifierInfo *Cur, const IdentifierInfo *New) {
return Cur->getName() == New->getName();
})) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
// Only 1 version of CPUSpecific is allowed for each CPU.
for (const IdentifierInfo *CurII : CurCPUSpec->cpus()) {
for (const IdentifierInfo *NewII : NewCPUSpec->cpus()) {
if (CurII == NewII) {
S.Diag(NewFD->getLocation(), diag::err_cpu_specific_multiple_defs)
<< NewII;
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
}
}
}
// If the two decls aren't the same MVType, there is no possible error
// condition.
}
}
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
if (NewMVType == MultiVersionKind::Target &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD,
!OldFD->isMultiVersion(), NewMVType)) {
NewFD->setInvalidDecl();
return true;
}
// Permit forward declarations in the case where these two are compatible.
if (!OldFD->isMultiVersion()) {
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = OldFD;
return false;
}
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
/// Check the validity of a mulitversion function declaration.
/// Also sets the multiversion'ness' of the function itself.
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
bool &Redeclaration, NamedDecl *&OldDecl,
bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
// Mixing Multiversioning types is prohibited.
if ((NewTA && NewCPUDisp) || (NewTA && NewCPUSpec) ||
(NewCPUDisp && NewCPUSpec)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
NewFD->setInvalidDecl();
return true;
}
MultiVersionKind MVType = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
if ((MVType == MultiVersionKind::Target && NewTA->isDefaultVersion()) ||
MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
}
return false;
}
if (!OldDecl || !OldDecl->getAsFunction() ||
OldDecl->getDeclContext()->getRedeclContext() !=
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
if (MVType == MultiVersionKind::None)
return false;
return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA);
}
FunctionDecl *OldFD = OldDecl->getAsFunction();
if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
return false;
if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
NewFD->setInvalidDecl();
return true;
}
// Handle the target potentially causes multiversioning case.
if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous);
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
return CheckMultiVersionAdditionalDecl(
S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, Redeclaration,
OldDecl, MergeTypeWithPrevious, Previous);
}
/// Perform semantic checking of a new function declaration.
///
/// Performs semantic analysis of the new function declaration
/// NewFD. This routine performs all semantic checking that does not
/// require the actual declarator involved in the declaration, and is
/// used both for the declaration of functions as they are parsed
/// (called via ActOnDeclarator) and for the declaration of functions
/// that have been instantiated via C++ template instantiation (called
/// via InstantiateDecl).
///
/// \param IsMemberSpecialization whether this new function declaration is
/// a member specialization (that replaces any definition provided by the
/// previous declaration).
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
bool IsMemberSpecialization) {
assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
"Variably modified return types are not handled here");
// Determine whether the type of this function should be merged with
// a previous visible declaration. This never happens for functions in C++,
// and always happens in C if the previous declaration was visible.
bool MergeTypeWithPrevious = !getLangOpts().CPlusPlus &&
!Previous.isShadowed();
bool Redeclaration = false;
NamedDecl *OldDecl = nullptr;
bool MayNeedOverloadableChecks = false;
// Merge or overload the declaration with an existing declaration of
// the same name, if appropriate.
if (!Previous.empty()) {
// Determine whether NewFD is an overload of PrevDecl or
// a declaration that requires merging. If it's an overload,
// there's no more work to do here; we'll just add the new
// function to the scope.
if (!AllowOverloadingOfFunction(Previous, Context, NewFD)) {
NamedDecl *Candidate = Previous.getRepresentativeDecl();
if (shouldLinkPossiblyHiddenDecl(Candidate, NewFD)) {
Redeclaration = true;
OldDecl = Candidate;
}
} else {
MayNeedOverloadableChecks = true;
switch (CheckOverload(S, NewFD, Previous, OldDecl,
/*NewIsUsingDecl*/ false)) {
case Ovl_Match:
Redeclaration = true;
break;
case Ovl_NonFunction:
Redeclaration = true;
break;
case Ovl_Overload:
Redeclaration = false;
break;
}
}
}
// Check for a previous extern "C" declaration with this name.
if (!Redeclaration &&
checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) {
if (!Previous.empty()) {
// This is an extern "C" declaration with the same name as a previous
// declaration, and thus redeclares that entity...
Redeclaration = true;
OldDecl = Previous.getFoundDecl();
MergeTypeWithPrevious = false;
// ... except in the presence of __attribute__((overloadable)).
if (OldDecl->hasAttr<OverloadableAttr>() ||
NewFD->hasAttr<OverloadableAttr>()) {
if (IsOverload(NewFD, cast<FunctionDecl>(OldDecl), false)) {
MayNeedOverloadableChecks = true;
Redeclaration = false;
OldDecl = nullptr;
}
}
}
}
if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous))
return Redeclaration;
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
//
// This needs to be delayed until we know whether this is an out-of-line
// definition of a static member function.
//
// This rule is not present in C++1y, so we produce a backwards
// compatibility warning whenever it happens in C++11.
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
!MD->getMethodQualifiers().hasConst()) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
if (!OldMD || !OldMD->isStatic()) {
const FunctionProtoType *FPT =
MD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.TypeQuals.addConst();
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
// Warn that we did this, if we're not performing template instantiation.
// In that case, we'll have warned already when the template was defined.
if (!inTemplateInstantiation()) {
SourceLocation AddConstLoc;
if (FunctionTypeLoc FTL = MD->getTypeSourceInfo()->getTypeLoc()
.IgnoreParens().getAs<FunctionTypeLoc>())
AddConstLoc = getLocForEndOfToken(FTL.getRParenLoc());
Diag(MD->getLocation(), diag::warn_cxx14_compat_constexpr_not_const)
<< FixItHint::CreateInsertion(AddConstLoc, " const");
}
}
}
if (Redeclaration) {
// NewFD and OldDecl represent declarations that need to be
// merged.
if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
Previous.clear();
Previous.addDecl(OldDecl);
if (FunctionTemplateDecl *OldTemplateDecl =
dyn_cast<FunctionTemplateDecl>(OldDecl)) {
auto *OldFD = OldTemplateDecl->getTemplatedDecl();
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
// The call to MergeFunctionDecl above may have created some state in
// NewTemplateDecl that needs to be merged with OldTemplateDecl before we
// can add it as a redeclaration.
NewTemplateDecl->mergePrevDecl(OldTemplateDecl);
NewFD->setPreviousDeclaration(OldFD);
adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember()) {
NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
}
// If this is an explicit specialization of a member that is a function
// template, mark it as a member specialization.
if (IsMemberSpecialization &&
NewTemplateDecl->getInstantiatedFromMemberTemplate()) {
NewTemplateDecl->setMemberSpecialization();
assert(OldTemplateDecl->isMemberSpecialization());
// Explicit specializations of a member template do not inherit deleted
// status from the parent member template that they are specializing.
if (OldFD->isDeleted()) {
// FIXME: This assert will not hold in the presence of modules.
assert(OldFD->getCanonicalDecl() == OldFD);
// FIXME: We need an update record for this AST mutation.
OldFD->setDeletedAsWritten(false);
}
}
} else {
if (shouldLinkDependentDeclWithPrevious(NewFD, OldDecl)) {
auto *OldFD = cast<FunctionDecl>(OldDecl);
// This needs to happen first so that 'inline' propagates.
NewFD->setPreviousDeclaration(OldFD);
adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember())
NewFD->setAccess(OldFD->getAccess());
}
}
} else if (!getLangOpts().CPlusPlus && MayNeedOverloadableChecks &&
!NewFD->getAttr<OverloadableAttr>()) {
assert((Previous.empty() ||
llvm::any_of(Previous,
[](const NamedDecl *ND) {
return ND->hasAttr<OverloadableAttr>();
})) &&
"Non-redecls shouldn't happen without overloadable present");
auto OtherUnmarkedIter = llvm::find_if(Previous, [](const NamedDecl *ND) {
const auto *FD = dyn_cast<FunctionDecl>(ND);
return FD && !FD->hasAttr<OverloadableAttr>();
});
if (OtherUnmarkedIter != Previous.end()) {
Diag(NewFD->getLocation(),
diag::err_attribute_overloadable_multiple_unmarked_overloads);
Diag((*OtherUnmarkedIter)->getLocation(),
diag::note_attribute_overloadable_prev_overload)
<< false;
NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
}
}
// Semantic checking for this function declaration (in isolation).
if (getLangOpts().CPlusPlus) {
// C++-specific checks.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
CheckConstructor(Constructor);
} else if (CXXDestructorDecl *Destructor =
dyn_cast<CXXDestructorDecl>(NewFD)) {
CXXRecordDecl *Record = Destructor->getParent();
QualType ClassType = Context.getTypeDeclType(Record);
// FIXME: Shouldn't we be able to perform this check even when the class
// type is dependent? Both gcc and edg can handle that.
if (!ClassType->isDependentType()) {
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(ClassType));
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
NewFD->setInvalidDecl();
return Redeclaration;
}
}
} else if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(NewFD)) {
ActOnConversionDeclarator(Conversion);
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(NewFD)) {
if (auto *TD = Guide->getDescribedFunctionTemplate())
CheckDeductionGuideTemplate(TD);
// A deduction guide is not on the list of entities that can be
// explicitly specialized.
if (Guide->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
Diag(Guide->getBeginLoc(), diag::err_deduction_guide_specialized)
<< /*explicit specialization*/ 1;
}
// Find any virtual functions that this function overrides.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
if (!Method->isFunctionTemplateSpecialization() &&
!Method->getDescribedFunctionTemplate() &&
Method->isCanonicalDecl()) {
if (AddOverriddenMethods(Method->getParent(), Method)) {
// If the function was marked as "static", we have a problem.
if (NewFD->getStorageClass() == SC_Static) {
ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
}
}
}
if (Method->isStatic())
checkThisInStaticMemberFunctionType(Method);
}
// Extra checking for C++ overloaded operators (C++ [over.oper]).
if (NewFD->isOverloadedOperator() &&
CheckOverloadedOperatorDeclaration(NewFD)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
// Extra checking for C++0x literal operators (C++0x [over.literal]).
if (NewFD->getLiteralIdentifier() &&
CheckLiteralOperatorDeclaration(NewFD)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
// In C++, check default arguments now that we have merged decls. Unless
// the lexical context is the class, because in this case this is done
// during delayed parsing anyway.
if (!CurContext->isRecord())
CheckCXXDefaultArguments(NewFD);
// If this function declares a builtin function, check the type of this
// declaration against the expected type for the builtin.
if (unsigned BuiltinID = NewFD->getBuiltinID()) {
ASTContext::GetBuiltinTypeError Error;
LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
QualType T = Context.GetBuiltinType(BuiltinID, Error);
// If the type of the builtin differs only in its exception
// specification, that's OK.
// FIXME: If the types do differ in this way, it would be better to
// retain the 'noexcept' form of the type.
if (!T.isNull() &&
!Context.hasSameFunctionTypeIgnoringExceptionSpec(T,
NewFD->getType()))
// The type of this function differs from the type of the builtin,
// so forget about the builtin entirely.
Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
}
// If this function is declared as being extern "C", then check to see if
// the function returns a UDT (class, struct, or union type) that is not C
// compatible, and if it does, warn the user.
// But, issue any diagnostic on the first declaration only.
if (Previous.empty() && NewFD->isExternC()) {
QualType R = NewFD->getReturnType();
if (R->isIncompleteType() && !R->isVoidType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
<< NewFD << R;
else if (!R.isPODType(Context) && !R->isVoidType() &&
!R->isObjCObjectPointerType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R;
}
// C++1z [dcl.fct]p6:
// [...] whether the function has a non-throwing exception-specification
// [is] part of the function type
//
// This results in an ABI break between C++14 and C++17 for functions whose
// declared type includes an exception-specification in a parameter or
// return type. (Exception specifications on the function itself are OK in
// most cases, and exception specifications are not permitted in most other
// contexts where they could make it into a mangling.)
if (!getLangOpts().CPlusPlus17 && !NewFD->getPrimaryTemplate()) {
auto HasNoexcept = [&](QualType T) -> bool {
// Strip off declarator chunks that could be between us and a function
// type. We don't need to look far, exception specifications are very
// restricted prior to C++17.
if (auto *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
else if (T->isAnyPointerType())
T = T->getPointeeType();
else if (auto *MPT = T->getAs<MemberPointerType>())
T = MPT->getPointeeType();
if (auto *FPT = T->getAs<FunctionProtoType>())
if (FPT->isNothrow())
return true;
return false;
};
auto *FPT = NewFD->getType()->castAs<FunctionProtoType>();
bool AnyNoexcept = HasNoexcept(FPT->getReturnType());
for (QualType T : FPT->param_types())
AnyNoexcept |= HasNoexcept(T);
if (AnyNoexcept)
Diag(NewFD->getLocation(),
diag::warn_cxx17_compat_exception_spec_in_signature)
<< NewFD;
}
if (!Redeclaration && LangOpts.CUDA)
checkCUDATargetOverload(NewFD, Previous);
}
return Redeclaration;
}
void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
// C++11 [basic.start.main]p3:
// A program that [...] declares main to be inline, static or
// constexpr is ill-formed.
// C11 6.7.4p4: In a hosted environment, no function specifier(s) shall
// appear in a declaration of main.
// static main is not an error under C99, but we should warn about it.
// We accept _Noreturn main as an extension.
if (FD->getStorageClass() == SC_Static)
Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus
? diag::err_static_main : diag::warn_static_main)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
if (FD->isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_main)
<< FixItHint::CreateRemoval(DS.getInlineSpecLoc());
if (DS.isNoreturnSpecified()) {
SourceLocation NoreturnLoc = DS.getNoreturnSpecLoc();
SourceRange NoreturnRange(NoreturnLoc, getLocForEndOfToken(NoreturnLoc));
Diag(NoreturnLoc, diag::ext_noreturn_main);
Diag(NoreturnLoc, diag::note_main_remove_noreturn)
<< FixItHint::CreateRemoval(NoreturnRange);
}
if (FD->isConstexpr()) {
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
<< FD->isConsteval()
<< FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
FD->setConstexprKind(CSK_unspecified);
}
if (getLangOpts().OpenCL) {
Diag(FD->getLocation(), diag::err_opencl_no_main)
<< FD->hasAttr<OpenCLKernelAttr>();
FD->setInvalidDecl();
return;
}
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType* FT = T->castAs<FunctionType>();
// Set default calling convention for main()
if (FT->getCallConv() != CC_C) {
FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(CC_C));
FD->setType(QualType(FT, 0));
T = Context.getCanonicalType(FD->getType());
}
if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) {
// In C with GNU extensions we allow main() to have non-integer return
// type, but we should warn about the extension, and we disable the
// implicit-return-zero rule.
// GCC in C mode accepts qualified 'int'.
if (Context.hasSameUnqualifiedType(FT->getReturnType(), Context.IntTy))
FD->setHasImplicitReturnZero(true);
else {
Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint);
SourceRange RTRange = FD->getReturnTypeSourceRange();
if (RTRange.isValid())
Diag(RTRange.getBegin(), diag::note_main_change_return_type)
<< FixItHint::CreateReplacement(RTRange, "int");
}
} else {
// In C and C++, main magically returns 0 if you fall off the end;
// set the flag which tells us that.
// This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
// All the standards say that main() should return 'int'.
if (Context.hasSameType(FT->getReturnType(), Context.IntTy))
FD->setHasImplicitReturnZero(true);
else {
// Otherwise, this is just a flat-out error.
SourceRange RTRange = FD->getReturnTypeSourceRange();
Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint)
<< (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "int")
: FixItHint());
FD->setInvalidDecl(true);
}
}
// Treat protoless main() as nullary.
if (isa<FunctionNoProtoType>(FT)) return;
const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT);
unsigned nparams = FTP->getNumParams();
assert(FD->getNumParams() == nparams);
bool HasExtraParameters = (nparams > 3);
if (FTP->isVariadic()) {
Diag(FD->getLocation(), diag::ext_variadic_main);
// FIXME: if we had information about the location of the ellipsis, we
// could add a FixIt hint to remove it as a parameter.
}
// Darwin passes an undocumented fourth argument of type char**. If
// other platforms start sprouting these, the logic below will start
// getting shifty.
if (nparams == 4 && Context.getTargetInfo().getTriple().isOSDarwin())
HasExtraParameters = false;
if (HasExtraParameters) {
Diag(FD->getLocation(), diag::err_main_surplus_args) << nparams;
FD->setInvalidDecl(true);
nparams = 3;
}
// FIXME: a lot of the following diagnostics would be improved
// if we had some location information about types.
QualType CharPP =
Context.getPointerType(Context.getPointerType(Context.CharTy));
QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP };
for (unsigned i = 0; i < nparams; ++i) {
QualType AT = FTP->getParamType(i);
bool mismatch = true;
if (Context.hasSameUnqualifiedType(AT, Expected[i]))
mismatch = false;
else if (Expected[i] == CharPP) {
// As an extension, the following forms are okay:
// char const **
// char const * const *
// char * const *
QualifierCollector qs;
const PointerType* PT;
if ((PT = qs.strip(AT)->getAs<PointerType>()) &&
(PT = qs.strip(PT->getPointeeType())->getAs<PointerType>()) &&
Context.hasSameType(QualType(qs.strip(PT->getPointeeType()), 0),
Context.CharTy)) {
qs.removeConst();
mismatch = !qs.empty();
}
}
if (mismatch) {
Diag(FD->getLocation(), diag::err_main_arg_wrong) << i << Expected[i];
// TODO: suggest replacing given type with expected type
FD->setInvalidDecl(true);
}
}
if (nparams == 1 && !FD->isInvalidDecl()) {
Diag(FD->getLocation(), diag::warn_main_one_arg);
}
if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
FD->setInvalidDecl();
}
}
void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType *FT = T->castAs<FunctionType>();
// Set an implicit return of 'zero' if the function can return some integral,
// enumeration, pointer or nullptr type.
if (FT->getReturnType()->isIntegralOrEnumerationType() ||
FT->getReturnType()->isAnyPointerType() ||
FT->getReturnType()->isNullPtrType())
// DllMain is exempt because a return value of zero means it failed.
if (FD->getName() != "DllMain")
FD->setHasImplicitReturnZero(true);
if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
FD->setInvalidDecl();
}
}
bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
// FIXME: Need strict checking. In C89, we need to check for
// any assignment, increment, decrement, function-calls, or
// commas outside of a sizeof. In C99, it's the same list,
// except that the aforementioned are allowed in unevaluated
// expressions. Everything else falls under the
// "may accept other forms of constant expressions" exception.
// (We never end up here for C++, so the constant expression
// rules there don't matter.)
const Expr *Culprit;
if (Init->isConstantInitializer(Context, false, &Culprit))
return false;
Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant)
<< Culprit->getSourceRange();
return true;
}
namespace {
// Visits an initialization expression to see if OrigDecl is evaluated in
// its own initialization and throws a warning if it does.
class SelfReferenceChecker
: public EvaluatedExprVisitor<SelfReferenceChecker> {
Sema &S;
Decl *OrigDecl;
bool isRecordType;
bool isPODType;
bool isReferenceType;
bool isInitList;
llvm::SmallVector<unsigned, 4> InitFieldIndex;
public:
typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
SelfReferenceChecker(Sema &S, Decl *OrigDecl) : Inherited(S.Context),
S(S), OrigDecl(OrigDecl) {
isPODType = false;
isRecordType = false;
isReferenceType = false;
isInitList = false;
if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
isPODType = VD->getType().isPODType(S.Context);
isRecordType = VD->getType()->isRecordType();
isReferenceType = VD->getType()->isReferenceType();
}
}
// For most expressions, just call the visitor. For initializer lists,
// track the index of the field being initialized since fields are
// initialized in order allowing use of previously initialized fields.
void CheckExpr(Expr *E) {
InitListExpr *InitList = dyn_cast<InitListExpr>(E);
if (!InitList) {
Visit(E);
return;
}
// Track and increment the index here.
isInitList = true;
InitFieldIndex.push_back(0);
for (auto Child : InitList->children()) {
CheckExpr(cast<Expr>(Child));
++InitFieldIndex.back();
}
InitFieldIndex.pop_back();
}
// Returns true if MemberExpr is checked and no further checking is needed.
// Returns false if additional checking is required.
bool CheckInitListMemberExpr(MemberExpr *E, bool CheckReference) {
llvm::SmallVector<FieldDecl*, 4> Fields;
Expr *Base = E;
bool ReferenceField = false;
// Get the field members used.
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!FD)
return false;
Fields.push_back(FD);
if (FD->getType()->isReferenceType())
ReferenceField = true;
Base = ME->getBase()->IgnoreParenImpCasts();
}
// Keep checking only if the base Decl is the same.
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base);
if (!DRE || DRE->getDecl() != OrigDecl)
return false;
// A reference field can be bound to an unininitialized field.
if (CheckReference && !ReferenceField)
return true;
// Convert FieldDecls to their index number.
llvm::SmallVector<unsigned, 4> UsedFieldIndex;
for (const FieldDecl *I : llvm::reverse(Fields))
UsedFieldIndex.push_back(I->getFieldIndex());
// See if a warning is needed by checking the first difference in index
// numbers. If field being used has index less than the field being
// initialized, then the use is safe.
for (auto UsedIter = UsedFieldIndex.begin(),
UsedEnd = UsedFieldIndex.end(),
OrigIter = InitFieldIndex.begin(),
OrigEnd = InitFieldIndex.end();
UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
if (*UsedIter < *OrigIter)
return true;
if (*UsedIter > *OrigIter)
break;
}
// TODO: Add a different warning which will print the field names.
HandleDeclRefExpr(DRE);
return true;
}
// For most expressions, the cast is directly above the DeclRefExpr.
// For conditional operators, the cast can be outside the conditional
// operator if both expressions are DeclRefExpr's.
void HandleValue(Expr *E) {
E = E->IgnoreParens();
if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
HandleDeclRefExpr(DRE);
return;
}
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
Visit(CO->getCond());
HandleValue(CO->getTrueExpr());
HandleValue(CO->getFalseExpr());
return;
}
if (BinaryConditionalOperator *BCO =
dyn_cast<BinaryConditionalOperator>(E)) {
Visit(BCO->getCond());
HandleValue(BCO->getFalseExpr());
return;
}
if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
HandleValue(OVE->getSourceExpr());
return;
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Comma) {
Visit(BO->getLHS());
HandleValue(BO->getRHS());
return;
}
}
if (isa<MemberExpr>(E)) {
if (isInitList) {
if (CheckInitListMemberExpr(cast<MemberExpr>(E),
false /*CheckReference*/))
return;
}
Expr *Base = E->IgnoreParenImpCasts();
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
// Check for static member variables and don't warn on them.
if (!isa<FieldDecl>(ME->getMemberDecl()))
return;
Base = ME->getBase()->IgnoreParenImpCasts();
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base))
HandleDeclRefExpr(DRE);
return;
}
Visit(E);
}
// Reference types not handled in HandleValue are handled here since all
// uses of references are bad, not just r-value uses.
void VisitDeclRefExpr(DeclRefExpr *E) {
if (isReferenceType)
HandleDeclRefExpr(E);
}
void VisitImplicitCastExpr(ImplicitCastExpr *E) {
if (E->getCastKind() == CK_LValueToRValue) {
HandleValue(E->getSubExpr());
return;
}
Inherited::VisitImplicitCastExpr(E);
}
void VisitMemberExpr(MemberExpr *E) {
if (isInitList) {
if (CheckInitListMemberExpr(E, true /*CheckReference*/))
return;
}
// Don't warn on arrays since they can be treated as pointers.
if (E->getType()->canDecayToPointerType()) return;
// Warn when a non-static method call is followed by non-static member
// field accesses, which is followed by a DeclRefExpr.
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl());
bool Warn = (MD && !MD->isStatic());
Expr *Base = E->getBase()->IgnoreParenImpCasts();
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
if (!isa<FieldDecl>(ME->getMemberDecl()))
Warn = false;
Base = ME->getBase()->IgnoreParenImpCasts();
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (Warn)
HandleDeclRefExpr(DRE);
return;
}
// The base of a MemberExpr is not a MemberExpr or a DeclRefExpr.
// Visit that expression.
Visit(Base);
}
void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Expr *Callee = E->getCallee();
if (isa<UnresolvedLookupExpr>(Callee))
return Inherited::VisitCXXOperatorCallExpr(E);
Visit(Callee);
for (auto Arg: E->arguments())
HandleValue(Arg->IgnoreParenImpCasts());
}
void VisitUnaryOperator(UnaryOperator *E) {
// For POD record types, addresses of its own members are well-defined.
if (E->getOpcode() == UO_AddrOf && isRecordType &&
isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) {
if (!isPODType)
HandleValue(E->getSubExpr());
return;
}
if (E->isIncrementDecrementOp()) {
HandleValue(E->getSubExpr());
return;
}
Inherited::VisitUnaryOperator(E);
}
void VisitObjCMessageExpr(ObjCMessageExpr *E) {}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
if (E->getConstructor()->isCopyConstructor()) {
Expr *ArgExpr = E->getArg(0);
if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
if (ILE->getNumInits() == 1)
ArgExpr = ILE->getInit(0);
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
if (ICE->getCastKind() == CK_NoOp)
ArgExpr = ICE->getSubExpr();
HandleValue(ArgExpr);
return;
}
Inherited::VisitCXXConstructExpr(E);
}
void VisitCallExpr(CallExpr *E) {
// Treat std::move as a use.
if (E->isCallToStdMove()) {
HandleValue(E->getArg(0));
return;
}
Inherited::VisitCallExpr(E);
}
void VisitBinaryOperator(BinaryOperator *E) {
if (E->isCompoundAssignmentOp()) {
HandleValue(E->getLHS());
Visit(E->getRHS());
return;
}
Inherited::VisitBinaryOperator(E);
}
// A custom visitor for BinaryConditionalOperator is needed because the
// regular visitor would check the condition and true expression separately
// but both point to the same place giving duplicate diagnostics.
void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
Visit(E->getCond());
Visit(E->getFalseExpr());
}
void HandleDeclRefExpr(DeclRefExpr *DRE) {
Decl* ReferenceDecl = DRE->getDecl();
if (OrigDecl != ReferenceDecl) return;
unsigned diag;
if (isReferenceType) {
diag = diag::warn_uninit_self_reference_in_reference_init;
} else if (cast<VarDecl>(OrigDecl)->isStaticLocal()) {
diag = diag::warn_static_self_reference_in_init;
} else if (isa<TranslationUnitDecl>(OrigDecl->getDeclContext()) ||
isa<NamespaceDecl>(OrigDecl->getDeclContext()) ||
DRE->getDecl()->getType()->isRecordType()) {
diag = diag::warn_uninit_self_reference_in_init;
} else {
// Local variables will be handled by the CFG analysis.
return;
}
S.DiagRuntimeBehavior(DRE->getBeginLoc(), DRE,
S.PDiag(diag)
<< DRE->getDecl() << OrigDecl->getLocation()
<< DRE->getSourceRange());
}
};
/// CheckSelfReference - Warns if OrigDecl is used in expression E.
static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E,
bool DirectInit) {
// Parameters arguments are occassionially constructed with itself,
// for instance, in recursive functions. Skip them.
if (isa<ParmVarDecl>(OrigDecl))
return;
E = E->IgnoreParens();
// Skip checking T a = a where T is not a record or reference type.
// Doing so is a way to silence uninitialized warnings.
if (!DirectInit && !cast<VarDecl>(OrigDecl)->getType()->isRecordType())
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
if (ICE->getCastKind() == CK_LValueToRValue)
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr()))
if (DRE->getDecl() == OrigDecl)
return;
SelfReferenceChecker(S, OrigDecl).CheckExpr(E);
}
} // end anonymous namespace
namespace {
// Simple wrapper to add the name of a variable or (if no variable is
// available) a DeclarationName into a diagnostic.
struct VarDeclOrName {
VarDecl *VDecl;
DeclarationName Name;
friend const Sema::SemaDiagnosticBuilder &
operator<<(const Sema::SemaDiagnosticBuilder &Diag, VarDeclOrName VN) {
return VN.VDecl ? Diag << VN.VDecl : Diag << VN.Name;
}
};
} // end anonymous namespace
QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeclarationName Name, QualType Type,
TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init) {
bool IsInitCapture = !VDecl;
assert((!VDecl || !VDecl->isInitCapture()) &&
"init captures are expected to be deduced prior to initialization");
VarDeclOrName VN{VDecl, Name};
DeducedType *Deduced = Type->getContainedDeducedType();
assert(Deduced && "deduceVarTypeFromInitializer for non-deduced type");
// C++11 [dcl.spec.auto]p3
if (!Init) {
assert(VDecl && "no init for init capture deduction?");
// Except for class argument deduction, and then for an initializing
// declaration only, i.e. no static at class scope or extern.
if (!isa<DeducedTemplateSpecializationType>(Deduced) ||
VDecl->hasExternalStorage() ||
VDecl->isStaticDataMember()) {
Diag(VDecl->getLocation(), diag::err_auto_var_requires_init)
<< VDecl->getDeclName() << Type;
return QualType();
}
}
ArrayRef<Expr*> DeduceInits;
if (Init)
DeduceInits = Init;
if (DirectInit) {
if (auto *PL = dyn_cast_or_null<ParenListExpr>(Init))
DeduceInits = PL->exprs();
}
if (isa<DeducedTemplateSpecializationType>(Deduced)) {
assert(VDecl && "non-auto type for init capture deduction?");
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind = InitializationKind::CreateForInit(
VDecl->getLocation(), DirectInit, Init);
// FIXME: Initialization should not be taking a mutable list of inits.
SmallVector<Expr*, 8> InitsCopy(DeduceInits.begin(), DeduceInits.end());
return DeduceTemplateSpecializationFromInitializer(TSI, Entity, Kind,
InitsCopy);
}
if (DirectInit) {
if (auto *IL = dyn_cast<InitListExpr>(Init))
DeduceInits = IL->inits();
}
// Deduction only works if we have exactly one source expression.
if (DeduceInits.empty()) {
// It isn't possible to write this directly, but it is possible to
// end up in this situation with "auto x(some_pack...);"
Diag(Init->getBeginLoc(), IsInitCapture
? diag::err_init_capture_no_expression
: diag::err_auto_var_init_no_expression)
<< VN << Type << Range;
return QualType();
}
if (DeduceInits.size() > 1) {
Diag(DeduceInits[1]->getBeginLoc(),
IsInitCapture ? diag::err_init_capture_multiple_expressions
: diag::err_auto_var_init_multiple_expressions)
<< VN << Type << Range;
return QualType();
}
Expr *DeduceInit = DeduceInits[0];
if (DirectInit && isa<InitListExpr>(DeduceInit)) {
Diag(Init->getBeginLoc(), IsInitCapture
? diag::err_init_capture_paren_braces
: diag::err_auto_var_init_paren_braces)
<< isa<InitListExpr>(Init) << VN << Type << Range;
return QualType();
}
// Expressions default to 'id' when we're in a debugger.
bool DefaultedAnyToId = false;
if (getLangOpts().DebuggerCastResultToId &&
Init->getType() == Context.UnknownAnyTy && !IsInitCapture) {
ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
if (Result.isInvalid()) {
return QualType();
}
Init = Result.get();
DefaultedAnyToId = true;
}
// C++ [dcl.decomp]p1:
// If the assignment-expression [...] has array type A and no ref-qualifier
// is present, e has type cv A
if (VDecl && isa<DecompositionDecl>(VDecl) &&
Context.hasSameUnqualifiedType(Type, Context.getAutoDeductType()) &&
DeduceInit->getType()->isConstantArrayType())
return Context.getQualifiedType(DeduceInit->getType(),
Type.getQualifiers());
QualType DeducedType;
if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
if (!IsInitCapture)
DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
else if (isa<InitListExpr>(Init))
Diag(Range.getBegin(),
diag::err_init_capture_deduction_failure_from_init_list)
<< VN
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
else
Diag(Range.getBegin(), diag::err_init_capture_deduction_failure)
<< VN << TSI->getType()
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
}
// Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
// 'id' instead of a specific object type prevents most of our usual
// checks.
// We only want to warn outside of template instantiations, though:
// inside a template, the 'id' could have come from a parameter.
if (!inTemplateInstantiation() && !DefaultedAnyToId && !IsInitCapture &&
!DeducedType.isNull() && DeducedType->isObjCIdType()) {
SourceLocation Loc = TSI->getTypeLoc().getBeginLoc();
Diag(Loc, diag::warn_auto_var_is_id) << VN << Range;
}
return DeducedType;
}
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init) {
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
if (DeducedType.isNull()) {
VDecl->setInvalidDecl();
return true;
}
VDecl->setType(DeducedType);
assert(VDecl->isLinkageValid());
// In ARC, infer lifetime.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDecl()) {
// We never need to merge the type, because we cannot form an incomplete
// array of auto, nor deduce such a type.
MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/ false);
}
// Check the deduced type is valid for a variable declaration.
CheckVariableDeclarationType(VDecl);
return VDecl->isInvalidDecl();
}
void Sema::checkNonTrivialCUnionInInitializer(const Expr *Init,
SourceLocation Loc) {
if (auto *CE = dyn_cast<ConstantExpr>(Init))
Init = CE->getSubExpr();
QualType InitType = Init->getType();
assert((InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
InitType.hasNonTrivialToPrimitiveCopyCUnion()) &&
"shouldn't be called if type doesn't have a non-trivial C struct");
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
for (auto I : ILE->inits()) {
if (!I->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() &&
!I->getType().hasNonTrivialToPrimitiveCopyCUnion())
continue;
SourceLocation SL = I->getExprLoc();
checkNonTrivialCUnionInInitializer(I, SL.isValid() ? SL : Loc);
}
return;
}
if (isa<ImplicitValueInitExpr>(Init)) {
if (InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion())
checkNonTrivialCUnion(InitType, Loc, NTCUC_DefaultInitializedObject,
NTCUK_Init);
} else {
// Assume all other explicit initializers involving copying some existing
// object.
// TODO: ignore any explicit initializers where we can guarantee
// copy-elision.
if (InitType.hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnion(InitType, Loc, NTCUC_CopyInit, NTCUK_Copy);
}
}
namespace {
struct DiagNonTrivalCUnionDefaultInitializeVisitor
: DefaultInitializedTypeVisitor<DiagNonTrivalCUnionDefaultInitializeVisitor,
void> {
using Super =
DefaultInitializedTypeVisitor<DiagNonTrivalCUnionDefaultInitializeVisitor,
void>;
DiagNonTrivalCUnionDefaultInitializeVisitor(
QualType OrigTy, SourceLocation OrigLoc,
Sema::NonTrivialCUnionContext UseContext, Sema &S)
: OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType QT,
const FieldDecl *FD, bool InNonTrivialUnion) {
if (const auto *AT = S.Context.getAsArrayType(QT))
return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
InNonTrivialUnion);
return Super::visitWithKind(PDIK, QT, FD, InNonTrivialUnion);
}
void visitARCStrong(QualType QT, const FieldDecl *FD,
bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 0 << QT << FD->getName();
}
void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 0 << QT << FD->getName();
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
if (auto *OrigRD = OrigTy->getAsRecordDecl())
IsUnion = OrigRD->isUnion();
S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
<< 0 << OrigTy << IsUnion << UseContext;
// Reset OrigLoc so that this diagnostic is emitted only once.
OrigLoc = SourceLocation();
}
InNonTrivialUnion = true;
}
if (InNonTrivialUnion)
S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
<< 0 << 0 << QT.getUnqualifiedType() << "";
for (const FieldDecl *FD : RD->fields())
asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
}
void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
// The non-trivial C union type or the struct/union type that contains a
// non-trivial C union.
QualType OrigTy;
SourceLocation OrigLoc;
Sema::NonTrivialCUnionContext UseContext;
Sema &S;
};
struct DiagNonTrivalCUnionDestructedTypeVisitor
: DestructedTypeVisitor<DiagNonTrivalCUnionDestructedTypeVisitor, void> {
using Super =
DestructedTypeVisitor<DiagNonTrivalCUnionDestructedTypeVisitor, void>;
DiagNonTrivalCUnionDestructedTypeVisitor(
QualType OrigTy, SourceLocation OrigLoc,
Sema::NonTrivialCUnionContext UseContext, Sema &S)
: OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
void visitWithKind(QualType::DestructionKind DK, QualType QT,
const FieldDecl *FD, bool InNonTrivialUnion) {
if (const auto *AT = S.Context.getAsArrayType(QT))
return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
InNonTrivialUnion);
return Super::visitWithKind(DK, QT, FD, InNonTrivialUnion);
}
void visitARCStrong(QualType QT, const FieldDecl *FD,
bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 1 << QT << FD->getName();
}
void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 1 << QT << FD->getName();
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
if (auto *OrigRD = OrigTy->getAsRecordDecl())
IsUnion = OrigRD->isUnion();
S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
<< 1 << OrigTy << IsUnion << UseContext;
// Reset OrigLoc so that this diagnostic is emitted only once.
OrigLoc = SourceLocation();
}
InNonTrivialUnion = true;
}
if (InNonTrivialUnion)
S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
<< 0 << 1 << QT.getUnqualifiedType() << "";
for (const FieldDecl *FD : RD->fields())
asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
}
void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
void visitCXXDestructor(QualType QT, const FieldDecl *FD,
bool InNonTrivialUnion) {}
// The non-trivial C union type or the struct/union type that contains a
// non-trivial C union.
QualType OrigTy;
SourceLocation OrigLoc;
Sema::NonTrivialCUnionContext UseContext;
Sema &S;
};
struct DiagNonTrivalCUnionCopyVisitor
: CopiedTypeVisitor<DiagNonTrivalCUnionCopyVisitor, false, void> {
using Super = CopiedTypeVisitor<DiagNonTrivalCUnionCopyVisitor, false, void>;
DiagNonTrivalCUnionCopyVisitor(QualType OrigTy, SourceLocation OrigLoc,
Sema::NonTrivialCUnionContext UseContext,
Sema &S)
: OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType QT,
const FieldDecl *FD, bool InNonTrivialUnion) {
if (const auto *AT = S.Context.getAsArrayType(QT))
return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
InNonTrivialUnion);
return Super::visitWithKind(PCK, QT, FD, InNonTrivialUnion);
}
void visitARCStrong(QualType QT, const FieldDecl *FD,
bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 2 << QT << FD->getName();
}
void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
if (InNonTrivialUnion)
S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
<< 1 << 2 << QT << FD->getName();
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
if (auto *OrigRD = OrigTy->getAsRecordDecl())
IsUnion = OrigRD->isUnion();
S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
<< 2 << OrigTy << IsUnion << UseContext;
// Reset OrigLoc so that this diagnostic is emitted only once.
OrigLoc = SourceLocation();
}
InNonTrivialUnion = true;
}
if (InNonTrivialUnion)
S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
<< 0 << 2 << QT.getUnqualifiedType() << "";
for (const FieldDecl *FD : RD->fields())
asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
}
void preVisit(QualType::PrimitiveCopyKind PCK, QualType QT,
const FieldDecl *FD, bool InNonTrivialUnion) {}
void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
void visitVolatileTrivial(QualType QT, const FieldDecl *FD,
bool InNonTrivialUnion) {}
// The non-trivial C union type or the struct/union type that contains a
// non-trivial C union.
QualType OrigTy;
SourceLocation OrigLoc;
Sema::NonTrivialCUnionContext UseContext;
Sema &S;
};
} // namespace
void Sema::checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind) {
assert((QT.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
QT.hasNonTrivialToPrimitiveDestructCUnion() ||
QT.hasNonTrivialToPrimitiveCopyCUnion()) &&
"shouldn't be called if type doesn't have a non-trivial C union");
if ((NonTrivialKind & NTCUK_Init) &&
QT.hasNonTrivialToPrimitiveDefaultInitializeCUnion())
DiagNonTrivalCUnionDefaultInitializeVisitor(QT, Loc, UseContext, *this)
.visit(QT, nullptr, false);
if ((NonTrivialKind & NTCUK_Destruct) &&
QT.hasNonTrivialToPrimitiveDestructCUnion())
DiagNonTrivalCUnionDestructedTypeVisitor(QT, Loc, UseContext, *this)
.visit(QT, nullptr, false);
if ((NonTrivialKind & NTCUK_Copy) && QT.hasNonTrivialToPrimitiveCopyCUnion())
DiagNonTrivalCUnionCopyVisitor(QT, Loc, UseContext, *this)
.visit(QT, nullptr, false);
}
/// AddInitializerToDecl - Adds the initializer Init to the
/// declaration dcl. If DirectInit is true, this is C++ direct
/// initialization rather than copy initialization.
void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
if (!RealDecl || RealDecl->isInvalidDecl()) {
CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl));
return;
}
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
// Pure-specifiers are handled in ActOnPureSpecifier.
Diag(Method->getLocation(), diag::err_member_function_initialization)
<< Method->getDeclName() << Init->getSourceRange();
Method->setInvalidDecl();
return;
}
VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
if (!VDecl) {
assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here");
Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
RealDecl->setInvalidDecl();
return;
}
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (VDecl->getType()->isUndeducedType()) {
// Attempt typo correction early so that the type of the init expression can
// be deduced based on the chosen correction if the original init contains a
// TypoExpr.
ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
if (!Res.isUsable()) {
RealDecl->setInvalidDecl();
return;
}
Init = Res.get();
if (DeduceVariableDeclarationType(VDecl, DirectInit, Init))
return;
}
// dllimport cannot be used on variable definitions.
if (VDecl->hasAttr<DLLImportAttr>() && !VDecl->isStaticDataMember()) {
Diag(VDecl->getLocation(), diag::err_attribute_dllimport_data_definition);
VDecl->setInvalidDecl();
return;
}
if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
// C99 6.7.8p5. C++ has no such restriction, but that is a defect.
Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
VDecl->setInvalidDecl();
return;
}
if (!VDecl->getType()->isDependentType()) {
// A definition must end up with a complete type, which means it must be
// complete with the restriction that an array type might be completed by
// the initializer; note that later code assumes this restriction.
QualType BaseDeclType = VDecl->getType();
if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
BaseDeclType = Array->getElementType();
if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
diag::err_typecheck_decl_incomplete_type)) {
RealDecl->setInvalidDecl();
return;
}
// The variable can not have an abstract class type.
if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
diag::err_abstract_type_in_decl,
AbstractVariableType))
VDecl->setInvalidDecl();
}
// If adding the initializer will turn this declaration into a definition,
// and we already have a definition for this variable, diagnose or otherwise
// handle the situation.
VarDecl *Def;
if ((Def = VDecl->getDefinition()) && Def != VDecl &&
(!VDecl->isStaticDataMember() || VDecl->isOutOfLine()) &&
!VDecl->isThisDeclarationADemotedDefinition() &&
checkVarDeclRedefinition(Def, VDecl))
return;
if (getLangOpts().CPlusPlus) {
// C++ [class.static.data]p4
// If a static data member is of const integral or const
// enumeration type, its declaration in the class definition can
// specify a constant-initializer which shall be an integral
// constant expression (5.19). In that case, the member can appear
// in integral constant expressions. The member shall still be
// defined in a namespace scope if it is used in the program and the
// namespace scope definition shall not contain an initializer.
//
// We already performed a redefinition check above, but for static
// data members we also need to check whether there was an in-class
// declaration with an initializer.
if (VDecl->isStaticDataMember() && VDecl->getCanonicalDecl()->hasInit()) {
Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization)
<< VDecl->getDeclName();
Diag(VDecl->getCanonicalDecl()->getInit()->getExprLoc(),
diag::note_previous_initializer)
<< 0;
return;
}
if (VDecl->hasLocalStorage())
setFunctionHasBranchProtectedScope();
if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
VDecl->setInvalidDecl();
return;
}
}
// OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside
// a kernel function cannot be initialized."
if (VDecl->getType().getAddressSpace() == LangAS::opencl_local) {
Diag(VDecl->getLocation(), diag::err_local_cant_init);
VDecl->setInvalidDecl();
return;
}
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
// Expressions default to 'id' when we're in a debugger
// and we are assigning it to a variable of Objective-C pointer type.
if (getLangOpts().DebuggerCastResultToId && DclT->isObjCObjectPointerType() &&
Init->getType() == Context.UnknownAnyTy) {
ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.get();
}
// Perform the initialization.
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
if (!VDecl->isInvalidDecl()) {
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind = InitializationKind::CreateForInit(
VDecl->getLocation(), DirectInit, Init);
MultiExprArg Args = Init;
if (CXXDirectInit)
Args = MultiExprArg(CXXDirectInit->getExprs(),
CXXDirectInit->getNumExprs());
// Try to correct any TypoExprs in the initialization arguments.
for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
ExprResult Res = CorrectDelayedTyposInExpr(
Args[Idx], VDecl, [this, Entity, Kind](Expr *E) {
InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
return Init.Failed() ? ExprError() : E;
});
if (Res.isInvalid()) {
VDecl->setInvalidDecl();
} else if (Res.get() != Args[Idx]) {
Args[Idx] = Res.get();
}
}
if (VDecl->isInvalidDecl())
return;
InitializationSequence InitSeq(*this, Entity, Kind, Args,
/*TopLevelOfInitList=*/false,
/*TreatUnavailableAsInvalid=*/false);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.getAs<Expr>();
}
// Check for self-references within variable initializers.
// Variables declared within a function/method body (except for references)
// are handled by a dataflow analysis.
// This is undefined behavior in C++, but valid in C.
if (getLangOpts().CPlusPlus) {
if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
VDecl->getType()->isReferenceType()) {
CheckSelfReference(*this, RealDecl, Init, DirectInit);
}
}
// If the type changed, it means we had an incomplete type that was
// completed by the initializer. For example:
// int ary[] = { 1, 3, 5 };
// "ary" transitions from an IncompleteArrayType to a ConstantArrayType.
if (!VDecl->isInvalidDecl() && (DclT != SavT))
VDecl->setType(DclT);
if (!VDecl->isInvalidDecl()) {
checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
if (VDecl->hasAttr<BlocksAttr>())
checkRetainCycles(VDecl, Init);
// It is safe to assign a weak reference into a strong variable.
// Although this code can still have problems:
// id x = self.weakProp;
// id y = self.weakProp;
// we do not warn to warn spuriously when 'x' and 'y' are on separate
// paths through the function. This should be revisited if
// -Wrepeated-use-of-weak is made flow-sensitive.
if (FunctionScopeInfo *FSI = getCurFunction())
if ((VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
VDecl->getType().isNonWeakInMRRWithObjCWeak(Context)) &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
Init->getBeginLoc()))
FSI->markSafeWeakUse(Init);
}
// The initialization is usually a full-expression.
//
// FIXME: If this is a braced initialization of an aggregate, it is not
// an expression, and each individual field initializer is a separate
// full-expression. For instance, in:
//
// struct Temp { ~Temp(); };
// struct S { S(Temp); };
// struct T { S a, b; } t = { Temp(), Temp() }
//
// we should destroy the first Temp before constructing the second.
ExprResult Result =
ActOnFinishFullExpr(Init, VDecl->getLocation(),
/*DiscardedValue*/ false, VDecl->isConstexpr());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.get();
// Attach the initializer to the decl.
VDecl->setInit(Init);
if (VDecl->isLocalVarDecl()) {
// Don't check the initializer if the declaration is malformed.
if (VDecl->isInvalidDecl()) {
// do nothing
// OpenCL v1.2 s6.5.3: __constant locals must be constant-initialized.
// This is true even in C++ for OpenCL.
} else if (VDecl->getType().getAddressSpace() == LangAS::opencl_constant) {
CheckForConstantInitializer(Init, DclT);
// Otherwise, C++ does not restrict the initializer.
} else if (getLangOpts().CPlusPlus) {
// do nothing
// C99 6.7.8p4: All the expressions in an initializer for an object that has
// static storage duration shall be constant expressions or string literals.
} else if (VDecl->getStorageClass() == SC_Static) {
CheckForConstantInitializer(Init, DclT);
// C89 is stricter than C99 for aggregate initializers.
// C89 6.5.7p3: All the expressions [...] in an initializer list
// for an object that has aggregate or union type shall be
// constant expressions.
} else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() &&
isa<InitListExpr>(Init)) {
const Expr *Culprit;
if (!Init->isConstantInitializer(Context, false, &Culprit)) {
Diag(Culprit->getExprLoc(),
diag::ext_aggregate_init_not_constant)
<< Culprit->getSourceRange();
}
}
if (auto *E = dyn_cast<ExprWithCleanups>(Init))
if (auto *BE = dyn_cast<BlockExpr>(E->getSubExpr()->IgnoreParens()))
if (VDecl->hasLocalStorage())
BE->getBlockDecl()->setCanAvoidCopyToHeap();
} else if (VDecl->isStaticDataMember() && !VDecl->isInline() &&
VDecl->getLexicalDeclContext()->isRecord()) {
// This is an in-class initialization for a static data member, e.g.,
//
// struct S {
// static const int value = 17;
// };
// C++ [class.mem]p4:
// A member-declarator can contain a constant-initializer only
// if it declares a static member (9.4) of const integral or
// const enumeration type, see 9.4.2.
//
// C++11 [class.static.data]p3:
// If a non-volatile non-inline const static data member is of integral
// or enumeration type, its declaration in the class definition can
// specify a brace-or-equal-initializer in which every initializer-clause
// that is an assignment-expression is a constant expression. A static
// data member of literal type can be declared in the class definition
// with the constexpr specifier; if so, its declaration shall specify a
// brace-or-equal-initializer in which every initializer-clause that is
// an assignment-expression is a constant expression.
// Do nothing on dependent types.
if (DclT->isDependentType()) {
// Allow any 'static constexpr' members, whether or not they are of literal
// type. We separately check that every constexpr variable is of literal
// type.
} else if (VDecl->isConstexpr()) {
// Require constness.
} else if (!DclT.isConstQualified()) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
// We allow integer constant expressions in all cases.
} else if (DclT->isIntegralOrEnumerationType()) {
// Check whether the expression is a constant expression.
SourceLocation Loc;
if (getLangOpts().CPlusPlus11 && DclT.isVolatileQualified())
// In C++11, a non-constexpr const static data member with an
// in-class initializer cannot be volatile.
Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile);
else if (Init->isValueDependent())
; // Nothing to check.
else if (Init->isIntegerConstantExpr(Context, &Loc))
; // Ok, it's an ICE!
else if (Init->getType()->isScopedEnumeralType() &&
Init->isCXX11ConstantExpr(Context))
; // Ok, it is a scoped-enum constant expression.
else if (Init->isEvaluatable(Context)) {
// If we can constant fold the initializer through heroics, accept it,
// but report this as a use of an extension for -pedantic.
Diag(Loc, diag::ext_in_class_initializer_non_constant)
<< Init->getSourceRange();
} else {
// Otherwise, this is some crazy unknown case. Report the issue at the
// location provided by the isIntegerConstantExpr failed check.
Diag(Loc, diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
}
// We allow foldable floating-point constants as an extension.
} else if (DclT->isFloatingType()) { // also permits complex, which is ok
// In C++98, this is a GNU extension. In C++11, it is not, but we support
// it anyway and provide a fixit to add the 'constexpr'.
if (getLangOpts().CPlusPlus11) {
Diag(VDecl->getLocation(),
diag::ext_in_class_initializer_float_type_cxx11)
<< DclT << Init->getSourceRange();
Diag(VDecl->getBeginLoc(),
diag::note_in_class_initializer_float_type_cxx11)
<< FixItHint::CreateInsertion(VDecl->getBeginLoc(), "constexpr ");
} else {
Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
<< DclT << Init->getSourceRange();
if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) {
Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
}
}
// Suggest adding 'constexpr' in C++11 for literal types.
} else if (getLangOpts().CPlusPlus11 && DclT->isLiteralType(Context)) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
<< DclT << Init->getSourceRange()
<< FixItHint::CreateInsertion(VDecl->getBeginLoc(), "constexpr ");
VDecl->setConstexpr(true);
} else {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
<< DclT << Init->getSourceRange();
VDecl->setInvalidDecl();
}
} else if (VDecl->isFileVarDecl()) {
// In C, extern is typically used to avoid tentative definitions when
// declaring variables in headers, but adding an intializer makes it a
// definition. This is somewhat confusing, so GCC and Clang both warn on it.
// In C++, extern is often used to give implictly static const variables
// external linkage, so don't warn in that case. If selectany is present,
// this might be header code intended for C and C++ inclusion, so apply the
// C++ rules.
if (VDecl->getStorageClass() == SC_Extern &&
((!getLangOpts().CPlusPlus && !VDecl->hasAttr<SelectAnyAttr>()) ||
!Context.getBaseElementType(VDecl->getType()).isConstQualified()) &&
!(getLangOpts().CPlusPlus && VDecl->isExternC()) &&
!isTemplateInstantiation(VDecl->getTemplateSpecializationKind()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
// In Microsoft C++ mode, a const variable defined in namespace scope has
// external linkage by default if the variable is declared with
// __declspec(dllexport).
if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
getLangOpts().CPlusPlus && VDecl->getType().isConstQualified() &&
VDecl->hasAttr<DLLExportAttr>() && VDecl->getDefinition())
VDecl->setStorageClass(SC_Extern);
// C99 6.7.8p4. All file scoped initializers need to be constant.
if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
CheckForConstantInitializer(Init, DclT);
}
QualType InitType = Init->getType();
if (!InitType.isNull() &&
(InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
InitType.hasNonTrivialToPrimitiveCopyCUnion()))
checkNonTrivialCUnionInInitializer(Init, Init->getExprLoc());
// We will represent direct-initialization similarly to copy-initialization:
// int x(1); -as-> int x = 1;
// ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
//
// Clients that want to distinguish between the two forms, can check for
// direct initializer using VarDecl::getInitStyle().
// A major benefit is that clients that don't particularly care about which
// exactly form was it (like the CodeGen) can handle both cases without
// special case code.
// C++ 8.5p11:
// The form of initialization (using parentheses or '=') is generally
// insignificant, but does matter when the entity being initialized has a
// class type.
if (CXXDirectInit) {
assert(DirectInit && "Call-style initializer must be direct init.");
VDecl->setInitStyle(VarDecl::CallInit);
} else if (DirectInit) {
// This must be list-initialization. No other way is direct-initialization.
VDecl->setInitStyle(VarDecl::ListInit);
}
CheckCompleteVariableDeclaration(VDecl);
}
/// ActOnInitializerError - Given that there was an error parsing an
/// initializer for the given declaration, try to return to some form
/// of sanity.
void Sema::ActOnInitializerError(Decl *D) {
// Our main concern here is re-establishing invariants like "a
// variable's type is either dependent or complete".
if (!D || D->isInvalidDecl()) return;
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) return;
// Bindings are not usable if we can't make sense of the initializer.
if (auto *DD = dyn_cast<DecompositionDecl>(D))
for (auto *BD : DD->bindings())
BD->setInvalidDecl();
// Auto types are meaningless if we can't make sense of the initializer.
if (ParsingInitForAutoVars.count(D)) {
D->setInvalidDecl();
return;
}
QualType Ty = VD->getType();
if (Ty->isDependentType()) return;
// Require a complete type.
if (RequireCompleteType(VD->getLocation(),
Context.getBaseElementType(Ty),
diag::err_typecheck_decl_incomplete_type)) {
VD->setInvalidDecl();
return;
}
// Require a non-abstract type.
if (RequireNonAbstractType(VD->getLocation(), Ty,
diag::err_abstract_type_in_decl,
AbstractVariableType)) {
VD->setInvalidDecl();
return;
}
// Don't bother complaining about constructors or destructors,
// though.
}
void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// If there is no declaration, there was an error parsing it. Just ignore it.
if (!RealDecl)
return;
if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
QualType Type = Var->getType();
// C++1z [dcl.dcl]p1 grammar implies that an initializer is mandatory.
if (isa<DecompositionDecl>(RealDecl)) {
Diag(Var->getLocation(), diag::err_decomp_decl_requires_init) << Var;
Var->setInvalidDecl();
return;
}
if (Type->isUndeducedType() &&
DeduceVariableDeclarationType(Var, false, nullptr))
return;
// C++11 [class.static.data]p3: A static data member can be declared with
// the constexpr specifier; if so, its declaration shall specify
// a brace-or-equal-initializer.
// C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
// the definition of a variable [...] or the declaration of a static data
// member.
if (Var->isConstexpr() && !Var->isThisDeclarationADefinition() &&
!Var->isThisDeclarationADemotedDefinition()) {
if (Var->isStaticDataMember()) {
// C++1z removes the relevant rule; the in-class declaration is always
// a definition there.
if (!getLangOpts().CPlusPlus17) {
Diag(Var->getLocation(),
diag::err_constexpr_static_mem_var_requires_init)
<< Var->getDeclName();
Var->setInvalidDecl();
return;
}
} else {
Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl);
Var->setInvalidDecl();
return;
}
}
// OpenCL v1.1 s6.5.3: variables declared in the constant address space must
// be initialized.
if (!Var->isInvalidDecl() &&
Var->getType().getAddressSpace() == LangAS::opencl_constant &&
Var->getStorageClass() != SC_Extern && !Var->getInit()) {
Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
Var->setInvalidDecl();
return;
}
VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
if (!Var->isInvalidDecl() && DefKind != VarDecl::DeclarationOnly &&
Var->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion())
checkNonTrivialCUnion(Var->getType(), Var->getLocation(),
NTCUC_DefaultInitializedObject, NTCUK_Init);
switch (DefKind) {
case VarDecl::Definition:
if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
break;
// We have an out-of-line definition of a static data member
// that has an in-class initializer, so we type-check this like
// a declaration.
//
LLVM_FALLTHROUGH;
case VarDecl::DeclarationOnly:
// It's only a declaration.
// Block scope. C99 6.7p7: If an identifier for an object is
// declared with no linkage (C99 6.2.2p6), the type for the
// object shall be complete.
if (!Type->isDependentType() && Var->isLocalVarDecl() &&
!Var->hasLinkage() && !Var->isInvalidDecl() &&
RequireCompleteType(Var->getLocation(), Type,
diag::err_typecheck_decl_incomplete_type))
Var->setInvalidDecl();
// Make sure that the type is not abstract.
if (!Type->isDependentType() && !Var->isInvalidDecl() &&
RequireNonAbstractType(Var->getLocation(), Type,
diag::err_abstract_type_in_decl,
AbstractVariableType))
Var->setInvalidDecl();
if (!Type->isDependentType() && !Var->isInvalidDecl() &&
Var->getStorageClass() == SC_PrivateExtern) {
Diag(Var->getLocation(), diag::warn_private_extern);
Diag(Var->getLocation(), diag::note_private_extern);
}
return;
case VarDecl::TentativeDefinition:
// File scope. C99 6.9.2p2: A declaration of an identifier for an
// object that has file scope without an initializer, and without a
// storage-class specifier or with the storage-class specifier "static",
// constitutes a tentative definition. Note: A tentative definition with
// external linkage is valid (C99 6.2.2p5).
if (!Var->isInvalidDecl()) {
if (const IncompleteArrayType *ArrayT
= Context.getAsIncompleteArrayType(Type)) {
if (RequireCompleteType(Var->getLocation(),
ArrayT->getElementType(),
diag::err_illegal_decl_array_incomplete_type))
Var->setInvalidDecl();
} else if (Var->getStorageClass() == SC_Static) {
// C99 6.9.2p3: If the declaration of an identifier for an object is
// a tentative definition and has internal linkage (C99 6.2.2p3), the
// declared type shall not be an incomplete type.
// NOTE: code such as the following
// static struct s;
// struct s { int a; };
// is accepted by gcc. Hence here we issue a warning instead of
// an error and we do not invalidate the static declaration.
// NOTE: to avoid multiple warnings, only check the first declaration.
if (Var->isFirstDecl())
RequireCompleteType(Var->getLocation(), Type,
diag::ext_typecheck_decl_incomplete_type);
}
}
// Record the tentative definition; we're done.
if (!Var->isInvalidDecl())
TentativeDefinitions.push_back(Var);
return;
}
// Provide a specific diagnostic for uninitialized variable
// definitions with incomplete array type.
if (Type->isIncompleteArrayType()) {
Diag(Var->getLocation(),
diag::err_typecheck_incomplete_array_needs_initializer);
Var->setInvalidDecl();
return;
}
// Provide a specific diagnostic for uninitialized variable
// definitions with reference type.
if (Type->isReferenceType()) {
Diag(Var->getLocation(), diag::err_reference_var_requires_init)
<< Var->getDeclName()
<< SourceRange(Var->getLocation(), Var->getLocation());
Var->setInvalidDecl();
return;
}
// Do not attempt to type-check the default initializer for a
// variable with dependent type.
if (Type->isDependentType())
return;
if (Var->isInvalidDecl())
return;
if (!Var->hasAttr<AliasAttr>()) {
if (RequireCompleteType(Var->getLocation(),
Context.getBaseElementType(Type),
diag::err_typecheck_decl_incomplete_type)) {
Var->setInvalidDecl();
return;
}
} else {
return;
}
// The variable can not have an abstract class type.
if (RequireNonAbstractType(Var->getLocation(), Type,
diag::err_abstract_type_in_decl,
AbstractVariableType)) {
Var->setInvalidDecl();
return;
}
// Check for jumps past the implicit initializer. C++0x
// clarifies that this applies to a "variable with automatic
// storage duration", not a "local variable".
// C++11 [stmt.dcl]p3
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope is
// ill-formed unless the variable has scalar type, class type with a
// trivial default constructor and a trivial destructor, a cv-qualified
// version of one of these types, or an array of one of the preceding
// types and is declared without an initializer.
if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
if (const RecordType *Record
= Context.getBaseElementType(Type)->getAs<RecordType>()) {
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
// Mark the function (if we're in one) for further checking even if the
// looser rules of C++11 do not require such checks, so that we can
// diagnose incompatibilities with C++98.
if (!CXXRecord->isPOD())
setFunctionHasBranchProtectedScope();
}
}
// In OpenCL, we can't initialize objects in the __local address space,
// even implicitly, so don't synthesize an implicit initializer.
if (getLangOpts().OpenCL &&
Var->getType().getAddressSpace() == LangAS::opencl_local)
return;
// C++03 [dcl.init]p9:
// If no initializer is specified for an object, and the
// object is of (possibly cv-qualified) non-POD class type (or
// array thereof), the object shall be default-initialized; if
// the object is of const-qualified type, the underlying class
// type shall have a user-declared default
// constructor. Otherwise, if no initializer is specified for
// a non- static object, the object and its subobjects, if
// any, have an indeterminate initial value); if the object
// or any of its subobjects are of const-qualified type, the
// program is ill-formed.
// C++0x [dcl.init]p11:
// If no initializer is specified for an object, the object is
// default-initialized; [...].
InitializedEntity Entity = InitializedEntity::InitializeVariable(Var);
InitializationKind Kind
= InitializationKind::CreateDefault(Var->getLocation());
InitializationSequence InitSeq(*this, Entity, Kind, None);
ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
if (Init.isInvalid())
Var->setInvalidDecl();
else if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
// This is important for template substitution.
Var->setInitStyle(VarDecl::CallInit);
}
CheckCompleteVariableDeclaration(Var);
}
}
void Sema::ActOnCXXForRangeDecl(Decl *D) {
// If there is no declaration, there was an error parsing it. Ignore it.
if (!D)
return;
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) {
Diag(D->getLocation(), diag::err_for_range_decl_must_be_var);
D->setInvalidDecl();
return;
}
VD->setCXXForRangeDecl(true);
// for-range-declaration cannot be given a storage class specifier.
int Error = -1;
switch (VD->getStorageClass()) {
case SC_None:
break;
case SC_Extern:
Error = 0;
break;
case SC_Static:
Error = 1;
break;
case SC_PrivateExtern:
Error = 2;
break;
case SC_Auto:
Error = 3;
break;
case SC_Register:
Error = 4;
break;
}
if (Error != -1) {
Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
<< VD->getDeclName() << Error;
D->setInvalidDecl();
}
}
StmtResult
Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd) {
// C++1y [stmt.iter]p1:
// A range-based for statement of the form
// for ( for-range-identifier : for-range-initializer ) statement
// is equivalent to
// for ( auto&& for-range-identifier : for-range-initializer ) statement
DeclSpec DS(Attrs.getPool().getFactory());
const char *PrevSpec;
unsigned DiagID;
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
Declarator D(DS, DeclaratorContext::ForContext);
D.SetIdentifier(Ident, IdentLoc);
D.takeAttributes(Attrs, AttrEnd);
D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
IdentLoc);
Decl *Var = ActOnDeclarator(S, D);
cast<VarDecl>(Var)->setCXXForRangeDecl(true);
FinalizeDeclaration(Var);
return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc,
AttrEnd.isValid() ? AttrEnd : IdentLoc);
}
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - Every block variable declaration must have an
// initialiser
if (var->getTypeSourceInfo()->getType()->isBlockPointerType() &&
!var->hasInit()) {
Diag(var->getLocation(), diag::err_opencl_invalid_block_declaration)
<< 1 /*Init*/;
var->setInvalidDecl();
return;
}
}
// In Objective-C, don't allow jumps past the implicit initialization of a
// local retaining variable.
if (getLangOpts().ObjC &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
setFunctionHasBranchProtectedScope();
break;
}
}
if (var->hasLocalStorage() &&
var->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
setFunctionHasBranchProtectedScope();
// Warn about externally-visible variables being defined without a
// prior declaration. We only want to do this for global
// declarations, but we also specifically need to avoid doing it for
// class members because the linkage of an anonymous class can
// change if it's later given a typedef name.
if (var->isThisDeclarationADefinition() &&
var->getDeclContext()->getRedeclContext()->isFileContext() &&
var->isExternallyVisible() && var->hasLinkage() &&
!var->isInline() && !var->getDescribedVarTemplate() &&
!isTemplateInstantiation(var->getTemplateSpecializationKind()) &&
!getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
var->getLocation())) {
// Find a previous declaration that's not a definition.
VarDecl *prev = var->getPreviousDecl();
while (prev && prev->isThisDeclarationADefinition())
prev = prev->getPreviousDecl();
if (!prev) {
Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var;
Diag(var->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
<< /* variable */ 0;
}
}
// Cache the result of checking for constant initialization.
Optional<bool> CacheHasConstInit;
const Expr *CacheCulprit = nullptr;
auto checkConstInit = [&]() mutable {
if (!CacheHasConstInit)
CacheHasConstInit = var->getInit()->isConstantInitializer(
Context, var->getType()->isReferenceType(), &CacheCulprit);
return *CacheHasConstInit;
};
if (var->getTLSKind() == VarDecl::TLS_Static) {
if (var->getType().isDestructedType()) {
// GNU C++98 edits for __thread, [basic.start.term]p3:
// The type of an object with thread storage duration shall not
// have a non-trivial destructor.
Diag(var->getLocation(), diag::err_thread_nontrivial_dtor);
if (getLangOpts().CPlusPlus11)
Diag(var->getLocation(), diag::note_use_thread_local);
} else if (getLangOpts().CPlusPlus && var->hasInit()) {
if (!checkConstInit()) {
// GNU C++98 edits for __thread, [basic.start.init]p4:
// An object of thread storage duration shall not require dynamic
// initialization.
// FIXME: Need strict checking here.
Diag(CacheCulprit->getExprLoc(), diag::err_thread_dynamic_init)
<< CacheCulprit->getSourceRange();
if (getLangOpts().CPlusPlus11)
Diag(var->getLocation(), diag::note_use_thread_local);
}
}
}
// Apply section attributes and pragmas to global variables.
bool GlobalStorage = var->hasGlobalStorage();
if (GlobalStorage && var->isThisDeclarationADefinition() &&
!inTemplateInstantiation()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
if (var->getType().isConstQualified())
Stack = &ConstSegStack;
else if (!var->getInit()) {
Stack = &BSSSegStack;
SectionFlags |= ASTContext::PSF_Write;
} else {
Stack = &DataSegStack;
SectionFlags |= ASTContext::PSF_Write;
}
if (Stack->CurrentValue && !var->hasAttr<SectionAttr>()) {
var->addAttr(SectionAttr::CreateImplicit(
Context, SectionAttr::Declspec_allocate,
Stack->CurrentValue->getString(), Stack->CurrentPragmaLocation));
}
if (const SectionAttr *SA = var->getAttr<SectionAttr>())
if (UnifySection(SA->getName(), SectionFlags, var))
var->dropAttr<SectionAttr>();
// Apply the init_seg attribute if this has an initializer. If the
// initializer turns out to not be dynamic, we'll end up ignoring this
// attribute.
if (CurInitSeg && var->getInit())
var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
CurInitSegLoc));
}
// All the following checks are C++ only.
if (!getLangOpts().CPlusPlus) {
// If this variable must be emitted, add it as an initializer for the
// current module.
if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, var);
return;
}
if (auto *DD = dyn_cast<DecompositionDecl>(var))
CheckCompleteDecompositionDeclaration(DD);
QualType type = var->getType();
if (type->isDependentType()) return;
if (var->hasAttr<BlocksAttr>())
getCurFunction()->addByrefBlockVar(var);
Expr *Init = var->getInit();
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
QualType baseType = Context.getBaseElementType(type);
if (Init && !Init->isValueDependent()) {
if (var->isConstexpr()) {
SmallVector<PartialDiagnosticAt, 8> Notes;
if (!var->evaluateValue(Notes) || !var->isInitICE()) {
SourceLocation DiagLoc = var->getLocation();
// If the note doesn't add any useful information other than a source
// location, fold it into the primary diagnostic.
if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
diag::note_invalid_subexpr_in_const_expr) {
DiagLoc = Notes[0].first;
Notes.clear();
}
Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
<< var << Init->getSourceRange();
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
Diag(Notes[I].first, Notes[I].second);
}
} else if (var->mightBeUsableInConstantExpressions(Context)) {
// Check whether the initializer of a const variable of integral or
// enumeration type is an ICE now, since we can't tell whether it was
// initialized by a constant expression if we check later.
var->checkInitIsICE();
}
// Don't emit further diagnostics about constexpr globals since they
// were just diagnosed.
if (!var->isConstexpr() && GlobalStorage &&
var->hasAttr<RequireConstantInitAttr>()) {
// FIXME: Need strict checking in C++03 here.
bool DiagErr = getLangOpts().CPlusPlus11
? !var->checkInitIsICE() : !checkConstInit();
if (DiagErr) {
auto attr = var->getAttr<RequireConstantInitAttr>();
Diag(var->getLocation(), diag::err_require_constant_init_failed)
<< Init->getSourceRange();
Diag(attr->getLocation(), diag::note_declared_required_constant_init_here)
<< attr->getRange();
if (getLangOpts().CPlusPlus11) {
APValue Value;
SmallVector<PartialDiagnosticAt, 8> Notes;
Init->EvaluateAsInitializer(Value, getASTContext(), var, Notes);
for (auto &it : Notes)
Diag(it.first, it.second);
} else {
Diag(CacheCulprit->getExprLoc(),
diag::note_invalid_subexpr_in_const_expr)
<< CacheCulprit->getSourceRange();
}
}
}
else if (!var->isConstexpr() && IsGlobal &&
!getDiagnostics().isIgnored(diag::warn_global_constructor,
var->getLocation())) {
// Warn about globals which don't have a constant initializer. Don't
// warn about globals with a non-trivial destructor because we already
// warned about them.
CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
if (!(RD && !RD->hasTrivialDestructor())) {
if (!checkConstInit())
Diag(var->getLocation(), diag::warn_global_constructor)
<< Init->getSourceRange();
}
}
}
// Require the destructor.
if (const RecordType *recordType = baseType->getAs<RecordType>())
FinalizeVarWithDestructor(var, recordType);
// If this variable must be emitted, add it as an initializer for the current
// module.
if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, var);
}
/// Determines if a variable's alignment is dependent.
static bool hasDependentAlignment(VarDecl *VD) {
if (VD->getType()->isDependentType())
return true;
for (auto *I : VD->specific_attrs<AlignedAttr>())
if (I->isAlignmentDependent())
return true;
return false;
}
/// Check if VD needs to be dllexport/dllimport due to being in a
/// dllexport/import function.
void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
assert(VD->isStaticLocal());
auto *FD = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
// Find outermost function when VD is in lambda function.
while (FD && !getDLLAttr(FD) &&
!FD->hasAttr<DLLExportStaticLocalAttr>() &&
!FD->hasAttr<DLLImportStaticLocalAttr>()) {
FD = dyn_cast_or_null<FunctionDecl>(FD->getParentFunctionOrMethod());
}
if (!FD)
return;
// Static locals inherit dll attributes from their function.
if (Attr *A = getDLLAttr(FD)) {
auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
NewAttr->setInherited(true);
VD->addAttr(NewAttr);
} else if (Attr *A = FD->getAttr<DLLExportStaticLocalAttr>()) {
auto *NewAttr = ::new (getASTContext()) DLLExportAttr(A->getRange(),
getASTContext(),
A->getSpellingListIndex());
NewAttr->setInherited(true);
VD->addAttr(NewAttr);
// Export this function to enforce exporting this static variable even
// if it is not used in this compilation unit.
if (!FD->hasAttr<DLLExportAttr>())
FD->addAttr(NewAttr);
} else if (Attr *A = FD->getAttr<DLLImportStaticLocalAttr>()) {
auto *NewAttr = ::new (getASTContext()) DLLImportAttr(A->getRange(),
getASTContext(),
A->getSpellingListIndex());
NewAttr->setInherited(true);
VD->addAttr(NewAttr);
}
}
/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
// Note that we are no longer parsing the initializer for this declaration.
ParsingInitForAutoVars.erase(ThisDecl);
VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDecl);
if (!VD)
return;
// Apply an implicit SectionAttr if '#pragma clang section bss|data|rodata' is active
if (VD->hasGlobalStorage() && VD->isThisDeclarationADefinition() &&
!inTemplateInstantiation() && !VD->hasAttr<SectionAttr>()) {
if (PragmaClangBSSSection.Valid)
VD->addAttr(PragmaClangBSSSectionAttr::CreateImplicit(Context,
PragmaClangBSSSection.SectionName,
PragmaClangBSSSection.PragmaLocation));
if (PragmaClangDataSection.Valid)
VD->addAttr(PragmaClangDataSectionAttr::CreateImplicit(Context,
PragmaClangDataSection.SectionName,
PragmaClangDataSection.PragmaLocation));
if (PragmaClangRodataSection.Valid)
VD->addAttr(PragmaClangRodataSectionAttr::CreateImplicit(Context,
PragmaClangRodataSection.SectionName,
PragmaClangRodataSection.PragmaLocation));
}
if (auto *DD = dyn_cast<DecompositionDecl>(ThisDecl)) {
for (auto *BD : DD->bindings()) {
FinalizeDeclaration(BD);
}
}
checkAttributesAfterMerging(*this, *VD);
// Perform TLS alignment check here after attributes attached to the variable
// which may affect the alignment have been processed. Only perform the check
// if the target has a maximum TLS alignment (zero means no constraints).
if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
// Protect the check so that it's not performed on dependent types and
// dependent alignments (we can't determine the alignment in that case).
if (VD->getTLSKind() && !hasDependentAlignment(VD) &&
!VD->isInvalidDecl()) {
CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
if (Context.getDeclAlign(VD) > MaxAlignChars) {
Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
<< (unsigned)Context.getDeclAlign(VD).getQuantity() << VD
<< (unsigned)MaxAlignChars.getQuantity();
}
}
}
if (VD->isStaticLocal()) {
CheckStaticLocalForDllExport(VD);
if (dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
// CUDA 8.0 E.3.9.4: Within the body of a __device__ or __global__
// function, only __shared__ variables or variables without any device
// memory qualifiers may be declared with static storage class.
// Note: It is unclear how a function-scope non-const static variable
// without device memory qualifier is implemented, therefore only static
// const variable without device memory qualifier is allowed.
[&]() {
if (!getLangOpts().CUDA)
return;
if (VD->hasAttr<CUDASharedAttr>())
return;
if (VD->getType().isConstQualified() &&
!(VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
return;
if (CUDADiagIfDeviceCode(VD->getLocation(),
diag::err_device_static_local_var)
<< CurrentCUDATarget())
VD->setInvalidDecl();
}();
}
}
// Perform check for initializers of device-side global variables.
// CUDA allows empty constructors as initializers (see E.2.3.1, CUDA
// 7.5). We must also apply the same checks to all __shared__
// variables whether they are local or not. CUDA also allows
// constant initializers for __constant__ and __device__ variables.
if (getLangOpts().CUDA)
checkAllowedCUDAInitializer(VD);
// Grab the dllimport or dllexport attribute off of the VarDecl.
const InheritableAttr *DLLAttr = getDLLAttr(VD);
// Imported static data members cannot be defined out-of-line.
if (const auto *IA = dyn_cast_or_null<DLLImportAttr>(DLLAttr)) {
if (VD->isStaticDataMember() && VD->isOutOfLine() &&
VD->isThisDeclarationADefinition()) {
// We allow definitions of dllimport class template static data members
// with a warning.
CXXRecordDecl *Context =
cast<CXXRecordDecl>(VD->getFirstDecl()->getDeclContext());
bool IsClassTemplateMember =
isa<ClassTemplatePartialSpecializationDecl>(Context) ||
Context->getDescribedClassTemplate();
Diag(VD->getLocation(),
IsClassTemplateMember
? diag::warn_attribute_dllimport_static_field_definition
: diag::err_attribute_dllimport_static_field_definition);
Diag(IA->getLocation(), diag::note_attribute);
if (!IsClassTemplateMember)
VD->setInvalidDecl();
}
}
// dllimport/dllexport variables cannot be thread local, their TLS index
// isn't exported with the variable.
if (DLLAttr && VD->getTLSKind()) {
auto *F = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
if (F && getDLLAttr(F)) {
assert(VD->isStaticLocal());
// But if this is a static local in a dlimport/dllexport function, the
// function will never be inlined, which means the var would never be
// imported, so having it marked import/export is safe.
} else {
Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
<< DLLAttr;
VD->setInvalidDecl();
}
}
if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
VD->dropAttr<UsedAttr>();
}
}
const DeclContext *DC = VD->getDeclContext();
// If there's a #pragma GCC visibility in scope, and this isn't a class
// member, set the visibility of this variable.
if (DC->getRedeclContext()->isFileContext() && VD->isExternallyVisible())
AddPushedVisibilityAttribute(VD);
// FIXME: Warn on unused var template partial specializations.
if (VD->isFileVarDecl() && !isa<VarTemplatePartialSpecializationDecl>(VD))
MarkUnusedFileScopedDecl(VD);
// Now we have parsed the initializer and can update the table of magic
// tag values.
if (!VD->hasAttr<TypeTagForDatatypeAttr>() ||
!VD->getType()->isIntegralOrEnumerationType())
return;
for (const auto *I : ThisDecl->specific_attrs<TypeTagForDatatypeAttr>()) {
const Expr *MagicValueExpr = VD->getInit();
if (!MagicValueExpr) {
continue;
}
llvm::APSInt MagicValueInt;
if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_not_ice)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
if (MagicValueInt.getActiveBits() > 64) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_too_large)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
uint64_t MagicValue = MagicValueInt.getZExtValue();
RegisterTypeTagForDatatype(I->getArgumentKind(),
MagicValue,
I->getMatchingCType(),
I->getLayoutCompatible(),
I->getMustBeNull());
}
}
static bool hasDeducedAuto(DeclaratorDecl *DD) {
auto *VD = dyn_cast<VarDecl>(DD);
return VD && !VD->getType()->hasAutoForTrailingReturnType();
}
Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group) {
SmallVector<Decl*, 8> Decls;
if (DS.isTypeSpecOwned())
Decls.push_back(DS.getRepAsDecl());
DeclaratorDecl *FirstDeclaratorInGroup = nullptr;
DecompositionDecl *FirstDecompDeclaratorInGroup = nullptr;
bool DiagnosedMultipleDecomps = false;
DeclaratorDecl *FirstNonDeducedAutoInGroup = nullptr;
bool DiagnosedNonDeducedAuto = false;
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
if (Decl *D = Group[i]) {
// For declarators, there are some additional syntactic-ish checks we need
// to perform.
if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
if (!FirstDeclaratorInGroup)
FirstDeclaratorInGroup = DD;
if (!FirstDecompDeclaratorInGroup)
FirstDecompDeclaratorInGroup = dyn_cast<DecompositionDecl>(D);
if (!FirstNonDeducedAutoInGroup && DS.hasAutoTypeSpec() &&
!hasDeducedAuto(DD))
FirstNonDeducedAutoInGroup = DD;
if (FirstDeclaratorInGroup != DD) {
// A decomposition declaration cannot be combined with any other
// declaration in the same group.
if (FirstDecompDeclaratorInGroup && !DiagnosedMultipleDecomps) {
Diag(FirstDecompDeclaratorInGroup->getLocation(),
diag::err_decomp_decl_not_alone)
<< FirstDeclaratorInGroup->getSourceRange()
<< DD->getSourceRange();
DiagnosedMultipleDecomps = true;
}
// A declarator that uses 'auto' in any way other than to declare a
// variable with a deduced type cannot be combined with any other
// declarator in the same group.
if (FirstNonDeducedAutoInGroup && !DiagnosedNonDeducedAuto) {
Diag(FirstNonDeducedAutoInGroup->getLocation(),
diag::err_auto_non_deduced_not_alone)
<< FirstNonDeducedAutoInGroup->getType()
->hasAutoForTrailingReturnType()
<< FirstDeclaratorInGroup->getSourceRange()
<< DD->getSourceRange();
DiagnosedNonDeducedAuto = true;
}
}
}
Decls.push_back(D);
}
}
if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
handleTagNumbering(Tag, S);
if (FirstDeclaratorInGroup && !Tag->hasNameForLinkage() &&
getLangOpts().CPlusPlus)
Context.addDeclaratorForUnnamedTagDecl(Tag, FirstDeclaratorInGroup);
}
}
return BuildDeclaratorGroup(Decls);
}
/// BuildDeclaratorGroup - convert a list of declarations into a declaration
/// group, performing any necessary semantic checking.
Sema::DeclGroupPtrTy
Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group) {
// C++14 [dcl.spec.auto]p7: (DR1347)
// If the type that replaces the placeholder type is not the same in each
// deduction, the program is ill-formed.
if (Group.size() > 1) {
QualType Deduced;
VarDecl *DeducedDecl = nullptr;
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
VarDecl *D = dyn_cast<VarDecl>(Group[i]);
if (!D || D->isInvalidDecl())
break;
DeducedType *DT = D->getType()->getContainedDeducedType();
if (!DT || DT->getDeducedType().isNull())
continue;
if (Deduced.isNull()) {
Deduced = DT->getDeducedType();
DeducedDecl = D;
} else if (!Context.hasSameType(DT->getDeducedType(), Deduced)) {
auto *AT = dyn_cast<AutoType>(DT);
Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
diag::err_auto_different_deductions)
<< (AT ? (unsigned)AT->getKeyword() : 3)
<< Deduced << DeducedDecl->getDeclName()
<< DT->getDeducedType() << D->getDeclName()
<< DeducedDecl->getInit()->getSourceRange()
<< D->getInit()->getSourceRange();
D->setInvalidDecl();
break;
}
}
}
ActOnDocumentableDecls(Group);
return DeclGroupPtrTy::make(
DeclGroupRef::Create(Context, Group.data(), Group.size()));
}
void Sema::ActOnDocumentableDecl(Decl *D) {
ActOnDocumentableDecls(D);
}
void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) {
// Don't parse the comment if Doxygen diagnostics are ignored.
if (Group.empty() || !Group[0])
return;
if (Diags.isIgnored(diag::warn_doc_param_not_found,
Group[0]->getLocation()) &&
Diags.isIgnored(diag::warn_unknown_comment_command_name,
Group[0]->getLocation()))
return;
if (Group.size() >= 2) {
// This is a decl group. Normally it will contain only declarations
// produced from declarator list. But in case we have any definitions or
// additional declaration references:
// 'typedef struct S {} S;'
// 'typedef struct S *S;'
// 'struct S *pS;'
// FinalizeDeclaratorGroup adds these as separate declarations.
Decl *MaybeTagDecl = Group[0];
if (MaybeTagDecl && isa<TagDecl>(MaybeTagDecl)) {
Group = Group.slice(1);
}
}
// See if there are any new comments that are not attached to a decl.
ArrayRef<RawComment *> Comments = Context.getRawCommentList().getComments();
if (!Comments.empty() &&
!Comments.back()->isAttached()) {
// There is at least one comment that not attached to a decl.
// Maybe it should be attached to one of these decls?
//
// Note that this way we pick up not only comments that precede the
// declaration, but also comments that *follow* the declaration -- thanks to
// the lookahead in the lexer: we've consumed the semicolon and looked
// ahead through comments.
for (unsigned i = 0, e = Group.size(); i != e; ++i)
Context.getCommentForDecl(Group[i], &PP);
}
}
/// Common checks for a parameter-declaration that should apply to both function
/// parameters and non-type template parameters.
void Sema::CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D) {
// Check that there are no default arguments inside the type of this
// parameter.
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
// Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
if (D.getCXXScopeSpec().isSet()) {
Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
<< D.getCXXScopeSpec().getRange();
}
// [dcl.meaning]p1: An unqualified-id occurring in a declarator-id shall be a
// simple identifier except [...irrelevant cases...].
switch (D.getName().getKind()) {
case UnqualifiedIdKind::IK_Identifier:
break;
case UnqualifiedIdKind::IK_OperatorFunctionId:
case UnqualifiedIdKind::IK_ConversionFunctionId:
case UnqualifiedIdKind::IK_LiteralOperatorId:
case UnqualifiedIdKind::IK_ConstructorName:
case UnqualifiedIdKind::IK_DestructorName:
case UnqualifiedIdKind::IK_ImplicitSelfParam:
case UnqualifiedIdKind::IK_DeductionGuideName:
Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
<< GetNameForDeclarator(D).getName();
break;
case UnqualifiedIdKind::IK_TemplateId:
case UnqualifiedIdKind::IK_ConstructorTemplateId:
// GetNameForDeclarator would not produce a useful name in this case.
Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name_template_id);
break;
}
}
/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
/// to introduce parameters into function prototype scope.
Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
// Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
// C++03 [dcl.stc]p2 also permits 'auto'.
StorageClass SC = SC_None;
if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
SC = SC_Register;
// In C++11, the 'register' storage class specifier is deprecated.
// In C++17, it is not allowed, but we tolerate it as an extension.
if (getLangOpts().CPlusPlus11) {
Diag(DS.getStorageClassSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::ext_register_storage_class
: diag::warn_deprecated_register)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
}
} else if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
SC = SC_Auto;
} else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_invalid_storage_class_in_func_decl);
D.getMutableDeclSpec().ClearStorageClassSpecs();
}
if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
Diag(DS.getThreadStorageClassSpecLoc(), diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DS.hasConstexprSpecifier())
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 0 << (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval);
DiagnoseFunctionSpecifiers(DS);
CheckFunctionOrTemplateParamDeclarator(S, D);
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType parmDeclType = TInfo->getType();
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
IdentifierInfo *II = D.getIdentifier();
if (II) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
ForVisibleRedeclaration);
LookupName(R, S);
if (R.isSingleResult()) {
NamedDecl *PrevDecl = R.getFoundDecl();
if (PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
} else if (S->isDeclScope(PrevDecl)) {
Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
// Recover by removing the name
II = nullptr;
D.SetIdentifier(nullptr, D.getIdentifierLoc());
D.setInvalidType(true);
}
}
}
// Temporarily put parameter variables in the translation unit, not
// the enclosing context. This prevents them from accidentally
// looking like class members in C++.
ParmVarDecl *New =
CheckParameter(Context.getTranslationUnitDecl(), D.getBeginLoc(),
D.getIdentifierLoc(), II, parmDeclType, TInfo, SC);
if (D.isInvalidType())
New->setInvalidDecl();
assert(S->isFunctionPrototypeScope());
assert(S->getFunctionPrototypeDepth() >= 1);
New->setScopeInfo(S->getFunctionPrototypeDepth() - 1,
S->getNextFunctionPrototypeIndex());
// Add the parameter declaration into this scope.
S->AddDecl(New);
if (II)
IdResolver.AddDecl(New);
ProcessDeclAttributes(S, New, D);
if (D.getDeclSpec().isModulePrivateSpecified())
Diag(New->getLocation(), diag::err_module_private_local)
<< 1 << New->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
if (New->hasAttr<BlocksAttr>()) {
Diag(New->getLocation(), diag::err_block_on_nonlocal);
}
return New;
}
/// Synthesizes a variable for a parameter arising from a
/// typedef.
ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T) {
/* FIXME: setting StartLoc == Loc.
Would it be worth to modify callers so as to provide proper source
location for the unnamed parameters, embedding the parameter's type? */
ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, Loc, nullptr,
T, Context.getTrivialTypeSourceInfo(T, Loc),
SC_None, nullptr);
Param->setImplicit();
return Param;
}
void Sema::DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters) {
// Don't diagnose unused-parameter errors in template instantiations; we
// will already have done so in the template itself.
if (inTemplateInstantiation())
return;
for (const ParmVarDecl *Parameter : Parameters) {
if (!Parameter->isReferenced() && Parameter->getDeclName() &&
!Parameter->hasAttr<UnusedAttr>()) {
Diag(Parameter->getLocation(), diag::warn_unused_parameter)
<< Parameter->getDeclName();
}
}
}
void Sema::DiagnoseSizeOfParametersAndReturnValue(
ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D) {
if (LangOpts.NumLargeByValueCopy == 0) // No check.
return;
// Warn if the return value is pass-by-value and larger than the specified
// threshold.
if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(D->getLocation(), diag::warn_return_value_size)
<< D->getDeclName() << Size;
}
// Warn if any parameter is pass-by-value and larger than the specified
// threshold.
for (const ParmVarDecl *Parameter : Parameters) {
QualType T = Parameter->getType();
if (T->isDependentType() || !T.isPODType(Context))
continue;
unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(Parameter->getLocation(), diag::warn_parameter_size)
<< Parameter->getDeclName() << Size;
}
}
ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC) {
// In ARC, infer a lifetime qualifier for appropriate parameter types.
if (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_None &&
T->isObjCLifetimeType()) {
Qualifiers::ObjCLifetime lifetime;
// Special cases for arrays:
// - if it's const, use __unsafe_unretained
// - otherwise, it's an error
if (T->isArrayType()) {
if (!T.isConstQualified()) {
if (DelayedDiagnostics.shouldDelayDiagnostics())
DelayedDiagnostics.add(
sema::DelayedDiagnostic::makeForbiddenType(
NameLoc, diag::err_arc_array_param_no_ownership, T, false));
else
Diag(NameLoc, diag::err_arc_array_param_no_ownership)
<< TSInfo->getTypeLoc().getSourceRange();
}
lifetime = Qualifiers::OCL_ExplicitNone;
} else {
lifetime = T->getObjCARCImplicitLifetime();
}
T = Context.getLifetimeQualifiedType(T, lifetime);
}
ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
Context.getAdjustedParameterType(T),
TSInfo, SC, nullptr);
if (New->getType().hasNonTrivialToPrimitiveDestructCUnion() ||
New->getType().hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnion(New->getType(), New->getLocation(),
NTCUC_FunctionParam, NTCUK_Destruct|NTCUK_Copy);
// Parameters can not be abstract class types.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
if (!CurContext->isRecord() &&
RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
AbstractParamType))
New->setInvalidDecl();
// Parameter declarators cannot be interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
SourceLocation TypeEndLoc =
getLocForEndOfToken(TSInfo->getTypeLoc().getEndLoc());
Diag(NameLoc,
diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
<< FixItHint::CreateInsertion(TypeEndLoc, "*");
T = Context.getObjCObjectPointerType(T);
New->setType(T);
}
// ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
// duration shall not be qualified by an address-space qualifier."
// Since all parameters have automatic store duration, they can not have
// an address space.
if (T.getAddressSpace() != LangAS::Default &&
// OpenCL allows function arguments declared to be an array of a type
// to be qualified with an address space.
!(getLangOpts().OpenCL &&
(T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private))) {
Diag(NameLoc, diag::err_arg_with_address_space);
New->setInvalidDecl();
}
return New;
}
void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
// for a K&R function.
if (!FTI.hasPrototype) {
for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
--i;
if (FTI.Params[i].Param == nullptr) {
SmallString<256> Code;
llvm::raw_svector_ostream(Code)
<< " int " << FTI.Params[i].Ident->getName() << ";\n";
Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
<< FTI.Params[i].Ident
<< FixItHint::CreateInsertion(LocAfterDecls, Code);
// Implicitly declare the argument as type 'int' for lack of a better
// type.
AttributeFactory attrs;
DeclSpec DS(attrs);
const char* PrevSpec; // unused
unsigned DiagID; // unused
DS.SetTypeSpecType(DeclSpec::TST_int, FTI.Params[i].IdentLoc, PrevSpec,
DiagID, Context.getPrintingPolicy());
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
Declarator ParamD(DS, DeclaratorContext::KNRTypeListContext);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
}
}
}
Decl *
Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody) {
assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
}
void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
Consumer.HandleInlineFunctionDefinition(D);
}
static bool
ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
const FunctionDecl *&PossiblePrototype) {
// Don't warn about invalid declarations.
if (FD->isInvalidDecl())
return false;
// Or declarations that aren't global.
if (!FD->isGlobal())
return false;
// Don't warn about C++ member functions.
if (isa<CXXMethodDecl>(FD))
return false;
// Don't warn about 'main'.
if (FD->isMain())
return false;
// Don't warn about inline functions.
if (FD->isInlined())
return false;
// Don't warn about function templates.
if (FD->getDescribedFunctionTemplate())
return false;
// Don't warn about function template specializations.
if (FD->isFunctionTemplateSpecialization())
return false;
// Don't warn for OpenCL kernels.
if (FD->hasAttr<OpenCLKernelAttr>())
return false;
// Don't warn on explicitly deleted functions.
if (FD->isDeleted())
return false;
for (const FunctionDecl *Prev = FD->getPreviousDecl();
Prev; Prev = Prev->getPreviousDecl()) {
// Ignore any declarations that occur in function or method
// scope, because they aren't visible from the header.
if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
continue;
PossiblePrototype = Prev;
return Prev->getType()->isFunctionNoProtoType();
}
return true;
}
void
Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition,
SkipBodyInfo *SkipBody) {
const FunctionDecl *Definition = EffectiveDefinition;
if (!Definition && !FD->isDefined(Definition) && !FD->isCXXClassMember()) {
// If this is a friend function defined in a class template, it does not
// have a body until it is used, nevertheless it is a definition, see
// [temp.inst]p2:
//
// ... for the purpose of determining whether an instantiated redeclaration
// is valid according to [basic.def.odr] and [class.mem], a declaration that
// corresponds to a definition in the template is considered to be a
// definition.
//
// The following code must produce redefinition error:
//
// template<typename T> struct C20 { friend void func_20() {} };
// C20<int> c20i;
// void func_20() {}
//
for (auto I : FD->redecls()) {
if (I != FD && !I->isInvalidDecl() &&
I->getFriendObjectKind() != Decl::FOK_None) {
if (FunctionDecl *Original = I->getInstantiatedFromMemberFunction()) {
if (FunctionDecl *OrigFD = FD->getInstantiatedFromMemberFunction()) {
// A merged copy of the same function, instantiated as a member of
// the same class, is OK.
if (declaresSameEntity(OrigFD, Original) &&
declaresSameEntity(cast<Decl>(I->getLexicalDeclContext()),
cast<Decl>(FD->getLexicalDeclContext())))
continue;
}
if (Original->isThisDeclarationADefinition()) {
Definition = I;
break;
}
}
}
}
}
if (!Definition)
// Similar to friend functions a friend function template may be a
// definition and do not have a body if it is instantiated in a class
// template.
if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) {
for (auto I : FTD->redecls()) {
auto D = cast<FunctionTemplateDecl>(I);
if (D != FTD) {
assert(!D->isThisDeclarationADefinition() &&
"More than one definition in redeclaration chain");
if (D->getFriendObjectKind() != Decl::FOK_None)
if (FunctionTemplateDecl *FT =
D->getInstantiatedFromMemberTemplate()) {
if (FT->isThisDeclarationADefinition()) {
Definition = D->getTemplatedDecl();
break;
}
}
}
}
}
if (!Definition)
return;
if (canRedefineFunction(Definition, getLangOpts()))
return;
// Don't emit an error when this is redefinition of a typo-corrected
// definition.
if (TypoCorrectedFunctionDefinitions.count(Definition))
return;
// If we don't have a visible definition of the function, and it's inline or
// a template, skip the new definition.
if (SkipBody && !hasVisibleDefinition(Definition) &&
(Definition->getFormalLinkage() == InternalLinkage ||
Definition->isInlined() ||
Definition->getDescribedFunctionTemplate() ||
Definition->getNumTemplateParameterLists())) {
SkipBody->ShouldSkip = true;
SkipBody->Previous = const_cast<FunctionDecl*>(Definition);
if (auto *TD = Definition->getDescribedFunctionTemplate())
makeMergedDefinitionVisible(TD);
makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition));
return;
}
if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
Definition->getStorageClass() == SC_Extern)
Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
<< FD->getDeclName() << getLangOpts().CPlusPlus;
else
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
FD->setInvalidDecl();
}
static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
Sema &S) {
CXXRecordDecl *const LambdaClass = CallOperator->getParent();
LambdaScopeInfo *LSI = S.PushLambdaScope();
LSI->CallOperator = CallOperator;
LSI->Lambda = LambdaClass;
LSI->ReturnType = CallOperator->getReturnType();
const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault();
if (LCD == LCD_None)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_None;
else if (LCD == LCD_ByCopy)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByval;
else if (LCD == LCD_ByRef)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByref;
DeclarationNameInfo DNI = CallOperator->getNameInfo();
LSI->IntroducerRange = DNI.getCXXOperatorNameRange();
LSI->Mutable = !CallOperator->isConst();
// Add the captures to the LSI so they can be noted as already
// captured within tryCaptureVar.
auto I = LambdaClass->field_begin();
for (const auto &C : LambdaClass->captures()) {
if (C.capturesVariable()) {
VarDecl *VD = C.getCapturedVar();
if (VD->isInitCapture())
S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
QualType CaptureType = VD->getType();
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
CaptureType, /*Invalid*/false);
} else if (C.capturesThis()) {
LSI->addThisCapture(/*Nested*/ false, C.getLocation(), I->getType(),
C.getCaptureKind() == LCK_StarThis);
} else {
LSI->addVLATypeCapture(C.getLocation(), I->getCapturedVLAType(),
I->getType());
}
++I;
}
}
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
SkipBodyInfo *SkipBody) {
if (!D) {
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
PushFunctionScope();
PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
return D;
}
FunctionDecl *FD = nullptr;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
FD = FunTmpl->getTemplatedDecl();
else
FD = cast<FunctionDecl>(D);
// Do not push if it is a lambda because one is already pushed when building
// the lambda in ActOnStartOfLambdaDefinition().
if (!isLambdaCallOperator(FD))
PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
FD->dropAttr<AliasAttr>();
FD->setInvalidDecl();
}
if (const auto *Attr = FD->getAttr<IFuncAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 1;
FD->dropAttr<IFuncAttr>();
FD->setInvalidDecl();
}
// See if this is a redefinition. If 'will have body' is already set, then
// these checks were already performed when it was set.
if (!FD->willHaveBody() && !FD->isLateTemplateParsed()) {
CheckForFunctionRedefinition(FD, nullptr, SkipBody);
// If we're skipping the body, we're done. Don't enter the scope.
if (SkipBody && SkipBody->ShouldSkip)
return D;
}
// Mark this function as "will have a body eventually". This lets users to
// call e.g. isInlineDefinitionExternallyVisible while we're still parsing
// this function.
FD->setWillHaveBody();
// If we are instantiating a generic lambda call operator, push
// a LambdaScopeInfo onto the function stack. But use the information
// that's already been calculated (ActOnLambdaExpr) to prime the current
// LambdaScopeInfo.
// When the template operator is being specialized, the LambdaScopeInfo,
// has to be properly restored so that tryCaptureVariable doesn't try
// and capture any new variables. In addition when calculating potential
// captures during transformation of nested lambdas, it is necessary to
// have the LSI properly restored.
if (isGenericLambdaCallOperatorSpecialization(FD)) {
assert(inTemplateInstantiation() &&
"There should be an active template instantiation on the stack "
"when instantiating a generic lambda!");
RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D), *this);
} else {
// Enter a new function scope
PushFunctionScope();
}
// Builtin functions cannot be defined.
if (unsigned BuiltinID = FD->getBuiltinID()) {
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
!Context.BuiltinInfo.isPredefinedRuntimeFunction(BuiltinID)) {
Diag(FD->getLocation(), diag::err_builtin_definition) << FD;
FD->setInvalidDecl();
}
}
// The return type of a function definition must be complete
// (C99 6.9.1p3, C++ [dcl.fct]p6).
QualType ResultType = FD->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
!FD->isInvalidDecl() &&
RequireCompleteType(FD->getLocation(), ResultType,
diag::err_func_def_incomplete_result))
FD->setInvalidDecl();
if (FnBodyScope)
PushDeclContext(FnBodyScope, FD);
// Check the validity of our function parameters
CheckParmsForFunctionDef(FD->parameters(),
/*CheckParameterNames=*/true);
// Add non-parameter declarations already in the function to the current
// scope.
if (FnBodyScope) {
for (Decl *NPD : FD->decls()) {
auto *NonParmDecl = dyn_cast<NamedDecl>(NPD);
if (!NonParmDecl)
continue;
assert(!isa<ParmVarDecl>(NonParmDecl) &&
"parameters should not be in newly created FD yet");
// If the decl has a name, make it accessible in the current scope.
if (NonParmDecl->getDeclName())
PushOnScopeChains(NonParmDecl, FnBodyScope, /*AddToContext=*/false);
// Similarly, dive into enums and fish their constants out, making them
// accessible in this scope.
if (auto *ED = dyn_cast<EnumDecl>(NonParmDecl)) {
for (auto *EI : ED->enumerators())
PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
}
}
}
// Introduce our parameters into the function scope
for (auto Param : FD->parameters()) {
Param->setOwningFunction(FD);
// If this has an identifier, add it to the scope stack.
if (Param->getIdentifier() && FnBodyScope) {
CheckShadow(FnBodyScope, Param);
PushOnScopeChains(Param, FnBodyScope);
}
}
// Ensure that the function's exception specification is instantiated.
if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(D->getLocation(), FPT);
// dllimport cannot be applied to non-inline function definitions.
if (FD->hasAttr<DLLImportAttr>() && !FD->isInlined() &&
!FD->isTemplateInstantiation()) {
assert(!FD->hasAttr<DLLExportAttr>());
Diag(FD->getLocation(), diag::err_attribute_dllimport_function_definition);
FD->setInvalidDecl();
return D;
}
// We want to attach documentation to original Decl (which might be
// a function template).
ActOnDocumentableDecl(D);
if (getCurLexicalContext()->isObjCContainer() &&
getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl &&
getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation)
Diag(FD->getLocation(), diag::warn_function_def_in_objc_container);
return D;
}
/// Given the set of return statements within a function body,
/// compute the variables that are subject to the named return value
/// optimization.
///
/// Each of the variables that is subject to the named return value
/// optimization will be marked as NRVO variables in the AST, and any
/// return statement that has a marked NRVO variable as its NRVO candidate can
/// use the named return value optimization.
///
/// This function applies a very simplistic algorithm for NRVO: if every return
/// statement in the scope of a variable has the same NRVO candidate, that
/// candidate is an NRVO variable.
void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
ReturnStmt **Returns = Scope->Returns.data();
for (unsigned I = 0, E = Scope->Returns.size(); I != E; ++I) {
if (const VarDecl *NRVOCandidate = Returns[I]->getNRVOCandidate()) {
if (!NRVOCandidate->isNRVOVariable())
Returns[I]->setNRVOCandidate(nullptr);
}
}
}
bool Sema::canDelayFunctionBody(const Declarator &D) {
// We can't delay parsing the body of a constexpr function template (yet).
if (D.getDeclSpec().hasConstexprSpecifier())
return false;
// We can't delay parsing the body of a function template with a deduced
// return type (yet).
if (D.getDeclSpec().hasAutoTypeSpec()) {
// If the placeholder introduces a non-deduced trailing return type,
// we can still delay parsing it.
if (D.getNumTypeObjects()) {
const auto &Outer = D.getTypeObject(D.getNumTypeObjects() - 1);
if (Outer.Kind == DeclaratorChunk::Function &&
Outer.Fun.hasTrailingReturnType()) {
QualType Ty = GetTypeFromParser(Outer.Fun.getTrailingReturnType());
return Ty.isNull() || !Ty->isUndeducedType();
}
}
return false;
}
return true;
}
bool Sema::canSkipFunctionBody(Decl *D) {
// We cannot skip the body of a function (or function template) which is
// constexpr, since we may need to evaluate its body in order to parse the
// rest of the file.
// We cannot skip the body of a function with an undeduced return type,
// because any callers of that function need to know the type.
if (const FunctionDecl *FD = D->getAsFunction()) {
if (FD->isConstexpr())
return false;
// We can't simply call Type::isUndeducedType here, because inside template
// auto can be deduced to a dependent type, which is not considered
// "undeduced".
if (FD->getReturnType()->getContainedDeducedType())
return false;
}
return Consumer.shouldSkipFunctionBody(D);
}
Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
if (!Decl)
return nullptr;
if (FunctionDecl *FD = Decl->getAsFunction())
FD->setHasSkippedBody();
else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(Decl))
MD->setHasSkippedBody();
return Decl;
}
Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
return ActOnFinishFunctionBody(D, BodyArg, false);
}
/// RAII object that pops an ExpressionEvaluationContext when exiting a function
/// body.
class ExitFunctionBodyRAII {
public:
ExitFunctionBodyRAII(Sema &S, bool IsLambda) : S(S), IsLambda(IsLambda) {}
~ExitFunctionBodyRAII() {
if (!IsLambda)
S.PopExpressionEvaluationContext();
}
private:
Sema &S;
bool IsLambda = false;
};
static void diagnoseImplicitlyRetainedSelf(Sema &S) {
llvm::DenseMap<const BlockDecl *, bool> EscapeInfo;
auto IsOrNestedInEscapingBlock = [&](const BlockDecl *BD) {
if (EscapeInfo.count(BD))
return EscapeInfo[BD];
bool R = false;
const BlockDecl *CurBD = BD;
do {
R = !CurBD->doesNotEscape();
if (R)
break;
CurBD = CurBD->getParent()->getInnermostBlockDecl();
} while (CurBD);
return EscapeInfo[BD] = R;
};
// If the location where 'self' is implicitly retained is inside a escaping
// block, emit a diagnostic.
for (const std::pair<SourceLocation, const BlockDecl *> &P :
S.ImplicitlyRetainedSelfLocs)
if (IsOrNestedInEscapingBlock(P.second))
S.Diag(P.first, diag::warn_implicitly_retains_self)
<< FixItHint::CreateInsertion(P.first, "self->");
}
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
if (getLangOpts().Coroutines && getCurFunction()->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
// Do not call PopExpressionEvaluationContext() if it is a lambda because one
// is already popped when finishing the lambda in BuildLambdaExpr(). This is
// meant to pop the context added in ActOnStartOfFunctionDef().
ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
if (FD) {
FD->setBody(Body);
FD->setWillHaveBody(false);
if (getLangOpts().CPlusPlus14) {
if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
FD->getReturnType()->isUndeducedType()) {
// If the function has a deduced result type but contains no 'return'
// statements, the result type as written must be exactly 'auto', and
// the deduced result type is 'void'.
if (!FD->getReturnType()->getAs<AutoType>()) {
Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
<< FD->getReturnType();
FD->setInvalidDecl();
} else {
// Substitute 'void' for the 'auto' in the type.
TypeLoc ResultType = getReturnTypeLoc(FD);
Context.adjustDeducedFunctionResultType(
FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
}
}
} else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
// In C++11, we don't use 'auto' deduction rules for lambda call
// operators because we don't support return type deduction.
auto *LSI = getCurLambda();
if (LSI->HasImplicitReturnType) {
deduceClosureReturnType(*LSI);
// C++11 [expr.prim.lambda]p4:
// [...] if there are no return statements in the compound-statement
// [the deduced type is] the type void
QualType RetType =
LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType;
// Update the return type to the deduced type.
const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>();
FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(),
Proto->getExtProtoInfo()));
}
}
// If the function implicitly returns zero (like 'main') or is naked,
// don't complain about missing return statements.
if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
WP.disableCheckFallThrough();
// MSVC permits the use of pure specifier (=0) on function definition,
// defined at class scope, warn about this non-standard construct.
if (getLangOpts().MicrosoftExt && FD->isPure() && !FD->isOutOfLine())
Diag(FD->getLocation(), diag::ext_pure_function_definition);
if (!FD->isInvalidDecl()) {
// Don't diagnose unused parameters of defaulted or deleted functions.
if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody())
DiagnoseUnusedParameters(FD->parameters());
DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
FD->getReturnType(), FD);
// If this is a structor, we need a vtable.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Constructor->getParent());
else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Destructor->getParent());
// Try to apply the named return value optimization. We have to check
// if we can do this here because lambdas keep return statements around
// to deduce an implicit return type.
if (FD->getReturnType()->isRecordType() &&
(!getLangOpts().CPlusPlus || !FD->isDependentContext()))
computeNRVO(Body, getCurFunction());
}
// GNU warning -Wmissing-prototypes:
// Warn if a global function is defined without a previous
// prototype declaration. This warning is issued even if the
// definition itself provides a prototype. The aim is to detect
// global functions that fail to be declared in header files.
const FunctionDecl *PossiblePrototype = nullptr;
if (ShouldWarnAboutMissingPrototype(FD, PossiblePrototype)) {
Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
if (PossiblePrototype) {
// We found a declaration that is not a prototype,
// but that could be a zero-parameter prototype
if (TypeSourceInfo *TI = PossiblePrototype->getTypeSourceInfo()) {
TypeLoc TL = TI->getTypeLoc();
if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
Diag(PossiblePrototype->getLocation(),
diag::note_declaration_not_a_prototype)
<< (FD->getNumParams() != 0)
<< (FD->getNumParams() == 0
? FixItHint::CreateInsertion(FTL.getRParenLoc(), "void")
: FixItHint{});
}
} else {
Diag(FD->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
<< /* function */ 1
<< (FD->getStorageClass() == SC_None
? FixItHint::CreateInsertion(FD->getTypeSpecStartLoc(),
"static ")
: FixItHint{});
}
// GNU warning -Wstrict-prototypes
// Warn if K&R function is defined without a previous declaration.
// This warning is issued only if the definition itself does not provide
// a prototype. Only K&R definitions do not provide a prototype.
// An empty list in a function declarator that is part of a definition
// of that function specifies that the function has no parameters
// (C99 6.7.5.3p14)
if (!FD->hasWrittenPrototype() && FD->getNumParams() > 0 &&
!LangOpts.CPlusPlus) {
TypeSourceInfo *TI = FD->getTypeSourceInfo();
TypeLoc TL = TI->getTypeLoc();
FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
Diag(FTL.getLParenLoc(), diag::warn_strict_prototypes) << 2;
}
}
// Warn on CPUDispatch with an actual body.
if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
if (!CmpndBody->body_empty())
Diag(CmpndBody->body_front()->getBeginLoc(),
diag::warn_dispatch_body_ignored);
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXMethodDecl *KeyFunction;
if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
MD->isVirtual() &&
(KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) &&
MD == KeyFunction->getCanonicalDecl()) {
// Update the key-function state if necessary for this ABI.
if (FD->isInlined() &&
!Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
Context.setNonKeyFunction(MD);
// If the newly-chosen key function is already defined, then we
// need to mark the vtable as used retroactively.
KeyFunction = Context.getCurrentKeyFunction(MD->getParent());
const FunctionDecl *Definition;
if (KeyFunction && KeyFunction->isDefined(Definition))
MarkVTableUsed(Definition->getLocation(), MD->getParent(), true);
} else {
// We just defined they key function; mark the vtable as used.
MarkVTableUsed(FD->getLocation(), MD->getParent(), true);
}
}
}
assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
"Function parsing confused");
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
if (!MD->isInvalidDecl()) {
DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
MD->getReturnType(), MD);
if (Body)
computeNRVO(Body, getCurFunction());
}
if (getCurFunction()->ObjCShouldCallSuper) {
Diag(MD->getEndLoc(), diag::warn_objc_missing_super_call)
<< MD->getSelector().getAsString();
getCurFunction()->ObjCShouldCallSuper = false;
}
if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
const ObjCMethodDecl *InitMethod = nullptr;
bool isDesignated =
MD->isDesignatedInitializerForTheInterface(&InitMethod);
assert(isDesignated && InitMethod);
(void)isDesignated;
auto superIsNSObject = [&](const ObjCMethodDecl *MD) {
auto IFace = MD->getClassInterface();
if (!IFace)
return false;
auto SuperD = IFace->getSuperClass();
if (!SuperD)
return false;
return SuperD->getIdentifier() ==
NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
};
// Don't issue this warning for unavailable inits or direct subclasses
// of NSObject.
if (!MD->isUnavailable() && !superIsNSObject(MD)) {
Diag(MD->getLocation(),
diag::warn_objc_designated_init_missing_super_call);
Diag(InitMethod->getLocation(),
diag::note_objc_designated_init_marked_here);
}
getCurFunction()->ObjCWarnForNoDesignatedInitChain = false;
}
if (getCurFunction()->ObjCWarnForNoInitDelegation) {
// Don't issue this warning for unavaialable inits.
if (!MD->isUnavailable())
Diag(MD->getLocation(),
diag::warn_objc_secondary_init_missing_init_call);
getCurFunction()->ObjCWarnForNoInitDelegation = false;
}
diagnoseImplicitlyRetainedSelf(*this);
} else {
// Parsing the function declaration failed in some way. Pop the fake scope
// we pushed on.
PopFunctionScopeInfo(ActivePolicy, dcl);
return nullptr;
}
if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
DiagnoseUnguardedAvailabilityViolations(dcl);
assert(!getCurFunction()->ObjCShouldCallSuper &&
"This should only be set for ObjC methods, which should have been "
"handled in the block above.");
// Verify and clean out per-function state.
if (Body && (!FD || !FD->isDefaulted())) {
// C++ constructors that have function-try-blocks can't have return
// statements in the handlers of that block. (C++ [except.handle]p14)
// Verify this.
if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
// Verify that gotos and switch cases don't jump into scopes illegally.
if (getCurFunction()->NeedsScopeChecking() &&
!PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(Body);
if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
if (!Destructor->getParent()->isDependentType())
CheckDestructor(Destructor);
MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
Destructor->getParent());
}
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (getDiagnostics().hasErrorOccurred() ||
getDiagnostics().getSuppressAllDiagnostics()) {
DiscardCleanupsInEvaluationContext();
}
if (!getDiagnostics().hasUncompilableErrorOccurred() &&
!isa<FunctionTemplateDecl>(dcl)) {
// Since the body is valid, issue any analysis-based warnings that are
// enabled.
ActivePolicy = &WP;
}
if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
(!CheckConstexprFunctionDecl(FD) ||
!CheckConstexprFunctionBody(FD, Body)))
FD->setInvalidDecl();
if (FD && FD->hasAttr<NakedAttr>()) {
for (const Stmt *S : Body->children()) {
// Allow local register variables without initializer as they don't
// require prologue.
bool RegisterVariables = false;
if (auto *DS = dyn_cast<DeclStmt>(S)) {
for (const auto *Decl : DS->decls()) {
if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
RegisterVariables =
Var->hasAttr<AsmLabelAttr>() && !Var->hasInit();
if (!RegisterVariables)
break;
}
}
}
if (RegisterVariables)
continue;
if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
Diag(S->getBeginLoc(), diag::err_non_asm_stmt_in_naked_function);
Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
FD->setInvalidDecl();
break;
}
}
}
assert(ExprCleanupObjects.size() ==
ExprEvalContexts.back().NumCleanupObjects &&
"Leftover temporaries in function");
assert(!Cleanup.exprNeedsCleanups() && "Unaccounted cleanups in function");
assert(MaybeODRUseExprs.empty() &&
"Leftover expressions for odr-use checking");
}
if (!IsInstantiation)
PopDeclContext();
PopFunctionScopeInfo(ActivePolicy, dcl);
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (getDiagnostics().hasErrorOccurred()) {
DiscardCleanupsInEvaluationContext();
}
return dcl;
}
/// When we finish delayed parsing of an attribute, we must attach it to the
/// relevant Decl.
void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
ParsedAttributes &Attrs) {
// Always attach attributes to the underlying decl.
if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
D = TD->getTemplatedDecl();
ProcessDeclAttributeList(S, D, Attrs);
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D))
if (Method->isStatic())
checkThisInStaticMemberFunctionAttributes(Method);
}
/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
// Find the scope in which the identifier is injected and the corresponding
// DeclContext.
// FIXME: C89 does not say what happens if there is no enclosing block scope.
// In that case, we inject the declaration into the translation unit scope
// instead.
Scope *BlockScope = S;
while (!BlockScope->isCompoundStmtScope() && BlockScope->getParent())
BlockScope = BlockScope->getParent();
Scope *ContextScope = BlockScope;
while (!ContextScope->getEntity())
ContextScope = ContextScope->getParent();
ContextRAII SavedContext(*this, ContextScope->getEntity());
// Before we produce a declaration for an implicitly defined
// function, see whether there was a locally-scoped declaration of
// this name as a function or variable. If so, use that
// (non-visible) declaration, and complain about it.
NamedDecl *ExternCPrev = findLocallyScopedExternCDecl(&II);
if (ExternCPrev) {
// We still need to inject the function into the enclosing block scope so
// that later (non-call) uses can see it.
PushOnScopeChains(ExternCPrev, BlockScope, /*AddToContext*/false);
// C89 footnote 38:
// If in fact it is not defined as having type "function returning int",
// the behavior is undefined.
if (!isa<FunctionDecl>(ExternCPrev) ||
!Context.typesAreCompatible(
cast<FunctionDecl>(ExternCPrev)->getType(),
Context.getFunctionNoProtoType(Context.IntTy))) {
Diag(Loc, diag::ext_use_out_of_scope_declaration)
<< ExternCPrev << !getLangOpts().C99;
Diag(ExternCPrev->getLocation(), diag::note_previous_declaration);
return ExternCPrev;
}
}
// Extension in C99. Legal in C90, but warn about it.
unsigned diag_id;
if (II.getName().startswith("__builtin_"))
diag_id = diag::warn_builtin_unknown;
// OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
else if (getLangOpts().OpenCL)
diag_id = diag::err_opencl_implicit_function_decl;
else if (getLangOpts().C99)
diag_id = diag::ext_implicit_function_decl;
else
diag_id = diag::warn_implicit_function_decl;
Diag(Loc, diag_id) << &II;
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
// more to do.
if (ExternCPrev)
return ExternCPrev;
// Because typo correction is expensive, only do it if the implicit
// function declaration is going to be treated as an error.
if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
TypoCorrection Corrected;
DeclFilterCCC<FunctionDecl> CCC{};
if (S && (Corrected =
CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
S, nullptr, CCC, CTK_NonError)))
diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
/*ErrorRecovery*/false);
}
// Set a Declarator for the implicit definition: int foo();
const char *Dummy;
AttributeFactory attrFactory;
DeclSpec DS(attrFactory);
unsigned DiagID;
bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID,
Context.getPrintingPolicy());
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
Declarator D(DS, DeclaratorContext::BlockContext);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
/*Params=*/nullptr,
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
/*DeclsInPrototype=*/None, Loc,
Loc, D),
std::move(DS.getAttributes()), SourceLocation());
D.SetIdentifier(&II, Loc);
// Insert this function into the enclosing block scope.
FunctionDecl *FD = cast<FunctionDecl>(ActOnDeclarator(BlockScope, D));
FD->setImplicit();
AddKnownFunctionAttributes(FD);
return FD;
}
/// Adds any function attributes that we know a priori based on
/// the declaration of this function.
///
/// These attributes can apply both to implicitly-declared builtins
/// (like __builtin___printf_chk) or to library-declared functions
/// like NSLog or printf.
///
/// We need to check for duplicate attributes both here and where user-written
/// attributes are applied to declarations.
void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
if (FD->isInvalidDecl())
return;
// If this is a built-in function, map its builtin attributes to
// actual attributes.
if (unsigned BuiltinID = FD->getBuiltinID()) {
// Handle printf-formatting attributes.
unsigned FormatIdx;
bool HasVAListArg;
if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
if (!FD->hasAttr<FormatAttr>()) {
const char *fmt = "printf";
unsigned int NumParams = FD->getNumParams();
if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
fmt = "NSString";
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get(fmt),
FormatIdx+1,
HasVAListArg ? 0 : FormatIdx+2,
FD->getLocation()));
}
}
if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
HasVAListArg)) {
if (!FD->hasAttr<FormatAttr>())
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get("scanf"),
FormatIdx+1,
HasVAListArg ? 0 : FormatIdx+2,
FD->getLocation()));
}
// Handle automatically recognized callbacks.
SmallVector<int, 4> Encoding;
if (!FD->hasAttr<CallbackAttr>() &&
Context.BuiltinInfo.performsCallback(BuiltinID, Encoding))
FD->addAttr(CallbackAttr::CreateImplicit(
Context, Encoding.data(), Encoding.size(), FD->getLocation()));
// Mark const if we don't care about errno and that is the only thing
// preventing the function from being const. This allows IRgen to use LLVM
// intrinsics for such functions.
if (!getLangOpts().MathErrno && !FD->hasAttr<ConstAttr>() &&
Context.BuiltinInfo.isConstWithoutErrno(BuiltinID))
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
// We make "fma" on some platforms const because we know it does not set
// errno in those environments even though it could set errno based on the
// C standard.
const llvm::Triple &Trip = Context.getTargetInfo().getTriple();
if ((Trip.isGNUEnvironment() || Trip.isAndroid() || Trip.isOSMSVCRT()) &&
!FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmal:
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal:
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
break;
default:
break;
}
}
if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) &&
!FD->hasAttr<ReturnsTwiceAttr>())
FD->addAttr(ReturnsTwiceAttr::CreateImplicit(Context,
FD->getLocation()));
if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->hasAttr<NoThrowAttr>())
FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
if (Context.BuiltinInfo.isPure(BuiltinID) && !FD->hasAttr<PureAttr>())
FD->addAttr(PureAttr::CreateImplicit(Context, FD->getLocation()));
if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>())
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
if (getLangOpts().CUDA && Context.BuiltinInfo.isTSBuiltin(BuiltinID) &&
!FD->hasAttr<CUDADeviceAttr>() && !FD->hasAttr<CUDAHostAttr>()) {
// Add the appropriate attribute, depending on the CUDA compilation mode
// and which target the builtin belongs to. For example, during host
// compilation, aux builtins are __device__, while the rest are __host__.
if (getLangOpts().CUDAIsDevice !=
Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
FD->addAttr(CUDADeviceAttr::CreateImplicit(Context, FD->getLocation()));
else
FD->addAttr(CUDAHostAttr::CreateImplicit(Context, FD->getLocation()));
}
}
// If C++ exceptions are enabled but we are told extern "C" functions cannot
// throw, add an implicit nothrow attribute to any extern "C" function we come
// across.
if (getLangOpts().CXXExceptions && getLangOpts().ExternCNoUnwind &&
FD->isExternC() && !FD->hasAttr<NoThrowAttr>()) {
const auto *FPT = FD->getType()->getAs<FunctionProtoType>();
if (!FPT || FPT->getExceptionSpecType() == EST_None)
FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
}
IdentifierInfo *Name = FD->getIdentifier();
if (!Name)
return;
if ((!getLangOpts().CPlusPlus &&
FD->getDeclContext()->isTranslationUnit()) ||
(isa<LinkageSpecDecl>(FD->getDeclContext()) &&
cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
LinkageSpecDecl::lang_c)) {
// Okay: this could be a libc/libm/Objective-C function we know
// about.
} else
return;
if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
// FIXME: asprintf and vasprintf aren't C99 functions. Should they be
// target-specific builtins, perhaps?
if (!FD->hasAttr<FormatAttr>())
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get("printf"), 2,
Name->isStr("vasprintf") ? 0 : 3,
FD->getLocation()));
}
if (Name->isStr("__CFStringMakeConstantString")) {
// We already have a __builtin___CFStringMakeConstantString,
// but builds that use -fno-constant-cfstrings don't go through that.
if (!FD->hasAttr<FormatArgAttr>())
FD->addAttr(FormatArgAttr::CreateImplicit(Context, ParamIdx(1, FD),
FD->getLocation()));
}
}
TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo) {
assert(D.getIdentifier() && "Wrong callback for declspec without declarator");
assert(!T.isNull() && "GetTypeForDeclarator() returned null type");
if (!TInfo) {
assert(D.isInvalidType() && "no declarator info for valid type");
TInfo = Context.getTrivialTypeSourceInfo(T);
}
// Scope manipulation handled by caller.
TypedefDecl *NewTD =
TypedefDecl::Create(Context, CurContext, D.getBeginLoc(),
D.getIdentifierLoc(), D.getIdentifier(), TInfo);
// Bail out immediately if we have an invalid declaration.
if (D.isInvalidType()) {
NewTD->setInvalidDecl();
return NewTD;
}
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (CurContext->isFunctionOrMethod())
Diag(NewTD->getLocation(), diag::err_module_private_local)
<< 2 << NewTD->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else
NewTD->setModulePrivate();
}
// C++ [dcl.typedef]p8:
// If the typedef declaration defines an unnamed class (or
// enum), the first typedef-name declared by the declaration
// to be that class type (or enum type) is used to denote the
// class type (or enum type) for linkage purposes only.
// We need to check whether the type was declared in the declaration.
switch (D.getDeclSpec().getTypeSpecType()) {
case TST_enum:
case TST_struct:
case TST_interface:
case TST_union:
case TST_class: {
TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
setTagNameForLinkagePurposes(tagFromDeclSpec, NewTD);
break;
}
default:
break;
}
return NewTD;
}
/// Check that this is a valid underlying type for an enum declaration.
bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
QualType T = TI->getType();
if (T->isDependentType())
return false;
if (const BuiltinType *BT = T->getAs<BuiltinType>())
if (BT->isInteger())
return false;
Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
return true;
}
/// Check whether this is a valid redeclaration of a previous enumeration.
/// \return true if the redeclaration was invalid.
bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev) {
if (IsScoped != Prev->isScoped()) {
Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
<< Prev->isScoped();
Diag(Prev->getLocation(), diag::note_previous_declaration);
return true;
}
if (IsFixed && Prev->isFixed()) {
if (!EnumUnderlyingTy->isDependentType() &&
!Prev->getIntegerType()->isDependentType() &&
!Context.hasSameUnqualifiedType(EnumUnderlyingTy,
Prev->getIntegerType())) {
// TODO: Highlight the underlying type of the redeclaration.
Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
<< EnumUnderlyingTy << Prev->getIntegerType();
Diag(Prev->getLocation(), diag::note_previous_declaration)
<< Prev->getIntegerTypeRange();
return true;
}
} else if (IsFixed != Prev->isFixed()) {
Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
<< Prev->isFixed();
Diag(Prev->getLocation(), diag::note_previous_declaration);
return true;
}
return false;
}
/// Get diagnostic %select index for tag kind for
/// redeclaration diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
/// \returns diagnostic %select index.
static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
case TTK_Struct: return 0;
case TTK_Interface: return 1;
case TTK_Class: return 2;
default: llvm_unreachable("Invalid tag kind for redecl diagnostic!");
}
}
/// Determine if tag kind is a class-key compatible with
/// class for redeclaration (class, struct, or __interface).
///
/// \returns true iff the tag kind is compatible.
static bool isClassCompatTagKind(TagTypeKind Tag)
{
return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
}
Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
TagTypeKind TTK) {
if (isa<TypedefDecl>(PrevDecl))
return NTK_Typedef;
else if (isa<TypeAliasDecl>(PrevDecl))
return NTK_TypeAlias;
else if (isa<ClassTemplateDecl>(PrevDecl))
return NTK_Template;
else if (isa<TypeAliasTemplateDecl>(PrevDecl))
return NTK_TypeAliasTemplate;
else if (isa<TemplateTemplateParmDecl>(PrevDecl))
return NTK_TemplateTemplateArgument;
switch (TTK) {
case TTK_Struct:
case TTK_Interface:
case TTK_Class:
return getLangOpts().CPlusPlus ? NTK_NonClass : NTK_NonStruct;
case TTK_Union:
return NTK_NonUnion;
case TTK_Enum:
return NTK_NonEnum;
}
llvm_unreachable("invalid TTK");
}
/// Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
/// \returns true if the new tag kind is acceptable, false otherwise.
bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name) {
// C++ [dcl.type.elab]p3:
// The class-key or enum keyword present in the
// elaborated-type-specifier shall agree in kind with the
// declaration to which the name in the elaborated-type-specifier
// refers. This rule also applies to the form of
// elaborated-type-specifier that declares a class-name or
// friend class since it can be construed as referring to the
// definition of the class. Thus, in any
// elaborated-type-specifier, the enum keyword shall be used to
// refer to an enumeration (7.2), the union class-key shall be
// used to refer to a union (clause 9), and either the class or
// struct class-key shall be used to refer to a class (clause 9)
// declared using the class or struct class-key.
TagTypeKind OldTag = Previous->getTagKind();
if (OldTag != NewTag &&
!(isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)))
return false;
// Tags are compatible, but we might still want to warn on mismatched tags.
// Non-class tags can't be mismatched at this point.
if (!isClassCompatTagKind(NewTag))
return true;
// Declarations for which -Wmismatched-tags is disabled are entirely ignored
// by our warning analysis. We don't want to warn about mismatches with (eg)
// declarations in system headers that are designed to be specialized, but if
// a user asks us to warn, we should warn if their code contains mismatched
// declarations.
auto IsIgnoredLoc = [&](SourceLocation Loc) {
return getDiagnostics().isIgnored(diag::warn_struct_class_tag_mismatch,
Loc);
};
if (IsIgnoredLoc(NewTagLoc))
return true;
auto IsIgnored = [&](const TagDecl *Tag) {
return IsIgnoredLoc(Tag->getLocation());
};
while (IsIgnored(Previous)) {
Previous = Previous->getPreviousDecl();
if (!Previous)
return true;
OldTag = Previous->getTagKind();
}
bool isTemplate = false;
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
isTemplate = Record->getDescribedClassTemplate();
if (inTemplateInstantiation()) {
if (OldTag != NewTag) {
// In a template instantiation, do not offer fix-its for tag mismatches
// since they usually mess up the template instead of fixing the problem.
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
// FIXME: Note previous location?
}
return true;
}
if (isDefinition) {
// On definitions, check all previous tags and issue a fix-it for each
// one that doesn't match the current tag.
if (Previous->getDefinition()) {
// Don't suggest fix-its for redefinitions.
return true;
}
bool previousMismatch = false;
for (const TagDecl *I : Previous->redecls()) {
if (I->getTagKind() != NewTag) {
// Ignore previous declarations for which the warning was disabled.
if (IsIgnored(I))
continue;
if (!previousMismatch) {
previousMismatch = true;
Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(I->getTagKind());
}
Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
<< getRedeclDiagFromTagKind(NewTag)
<< FixItHint::CreateReplacement(I->getInnerLocStart(),
TypeWithKeyword::getTagTypeKindName(NewTag));
}
}
return true;
}
// Identify the prevailing tag kind: this is the kind of the definition (if
// there is a non-ignored definition), or otherwise the kind of the prior
// (non-ignored) declaration.
const TagDecl *PrevDef = Previous->getDefinition();
if (PrevDef && IsIgnored(PrevDef))
PrevDef = nullptr;
const TagDecl *Redecl = PrevDef ? PrevDef : Previous;
if (Redecl->getTagKind() != NewTag) {
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
Diag(Redecl->getLocation(), diag::note_previous_use);
// If there is a previous definition, suggest a fix-it.
if (PrevDef) {
Diag(NewTagLoc, diag::note_struct_class_suggestion)
<< getRedeclDiagFromTagKind(Redecl->getTagKind())
<< FixItHint::CreateReplacement(SourceRange(NewTagLoc),
TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
}
}
return true;
}
/// Add a minimal nested name specifier fixit hint to allow lookup of a tag name
/// from an outer enclosing namespace or file scope inside a friend declaration.
/// This should provide the commented out code in the following snippet:
/// namespace N {
/// struct X;
/// namespace M {
/// struct Y { friend struct /*N::*/ X; };
/// }
/// }
static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S,
SourceLocation NameLoc) {
// While the decl is in a namespace, do repeated lookup of that name and see
// if we get the same namespace back. If we do not, continue until
// translation unit scope, at which point we have a fully qualified NNS.
SmallVector<IdentifierInfo *, 4> Namespaces;
DeclContext *DC = ND->getDeclContext()->getRedeclContext();
for (; !DC->isTranslationUnit(); DC = DC->getParent()) {
// This tag should be declared in a namespace, which can only be enclosed by
// other namespaces. Bail if there's an anonymous namespace in the chain.
NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC);
if (!Namespace || Namespace->isAnonymousNamespace())
return FixItHint();
IdentifierInfo *II = Namespace->getIdentifier();
Namespaces.push_back(II);
NamedDecl *Lookup = SemaRef.LookupSingleName(
S, II, NameLoc, Sema::LookupNestedNameSpecifierName);
if (Lookup == Namespace)
break;
}
// Once we have all the namespaces, reverse them to go outermost first, and
// build an NNS.
SmallString<64> Insertion;
llvm::raw_svector_ostream OS(Insertion);
if (DC->isTranslationUnit())
OS << "::";
std::reverse(Namespaces.begin(), Namespaces.end());
for (auto *II : Namespaces)
OS << II->getName() << "::";
return FixItHint::CreateInsertion(NameLoc, Insertion);
}
/// Determine whether a tag originally declared in context \p OldDC can
/// be redeclared with an unqualified name in \p NewDC (assuming name lookup
/// found a declaration in \p OldDC as a previous decl, perhaps through a
/// using-declaration).
static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
DeclContext *NewDC) {
OldDC = OldDC->getRedeclContext();
NewDC = NewDC->getRedeclContext();
if (OldDC->Equals(NewDC))
return true;
// In MSVC mode, we allow a redeclaration if the contexts are related (either
// encloses the other).
if (S.getLangOpts().MSVCCompat &&
(OldDC->Encloses(NewDC) || NewDC->Encloses(OldDC)))
return true;
return false;
}
/// This is invoked when we see 'struct foo' or 'struct {'. In the
/// former case, Name will be non-null. In the later case, Name will be null.
/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
/// reference/declaration/definition of a tag.
///
/// \param IsTypeSpecifier \c true if this is a type-specifier (or
/// trailing-type-specifier) other than one in an alias-declaration.
///
/// \param SkipBody If non-null, will be set to indicate if the caller should
/// skip the definition of this tag and treat it as if it were a declaration.
Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attrs, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
assert((Name != nullptr || TUK == TUK_Definition) &&
"Nameless record must be a definition!");
assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
bool ScopedEnum = ScopedEnumKWLoc.isValid();
// FIXME: Check member specializations more carefully.
bool isMemberSpecialization = false;
bool Invalid = false;
// We only need to do this matching if we have template parameters
// or a scope specifier, which also conveniently avoids this work
// for non-C++ cases.
if (TemplateParameterLists.size() > 0 ||
(SS.isNotEmpty() && TUK != TUK_Reference)) {
if (TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
TUK == TUK_Friend, isMemberSpecialization, Invalid)) {
if (Kind == TTK_Enum) {
Diag(KWLoc, diag::err_enum_template);
return nullptr;
}
if (TemplateParams->size() > 0) {
// This is a declaration or definition of a class template (which may
// be a member of another template).
if (Invalid)
return nullptr;
OwnedDecl = false;
DeclResult Result = CheckClassTemplate(
S, TagSpec, TUK, KWLoc, SS, Name, NameLoc, Attrs, TemplateParams,
AS, ModulePrivateLoc,
/*FriendLoc*/ SourceLocation(), TemplateParameterLists.size() - 1,
TemplateParameterLists.data(), SkipBody);
return Result.get();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
<< TypeWithKeyword::getTagTypeKindName(Kind) << Name;
isMemberSpecialization = true;
}
}
}
// Figure out the underlying type if this a enum declaration. We need to do
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
bool IsFixed = !UnderlyingType.isUnset() || ScopedEnum;
if (Kind == TTK_Enum) {
if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum)) {
// No underlying type explicitly specified, or we failed to parse the
// type, default to int.
EnumUnderlying = Context.IntTy.getTypePtr();
} else if (UnderlyingType.get()) {
// C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
// integral type; any cv-qualification is ignored.
TypeSourceInfo *TI = nullptr;
GetTypeFromParser(UnderlyingType.get(), &TI);
EnumUnderlying = TI;
if (CheckEnumUnderlyingType(TI))
// Recover by falling back to int.
EnumUnderlying = Context.IntTy.getTypePtr();
if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI,
UPPC_FixedUnderlyingType))
EnumUnderlying = Context.IntTy.getTypePtr();
} else if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// For MSVC ABI compatibility, unfixed enums must use an underlying type
// of 'int'. However, if this is an unfixed forward declaration, don't set
// the underlying type unless the user enables -fms-compatibility. This
// makes unfixed forward declared enums incomplete and is more conforming.
if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
}
}
DeclContext *SearchDC = CurContext;
DeclContext *DC = CurContext;
bool isStdBadAlloc = false;
bool isStdAlignValT = false;
RedeclarationKind Redecl = forRedeclarationInCurContext();
if (TUK == TUK_Friend || TUK == TUK_Reference)
Redecl = NotForRedeclaration;
/// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C
/// implemented asks for structural equivalence checking, the returned decl
/// here is passed back to the parser, allowing the tag body to be parsed.
auto createTagFromNewDecl = [&]() -> TagDecl * {
assert(!getLangOpts().CPlusPlus && "not meant for C++ usage");
// If there is an identifier, use the location of the identifier as the
// location of the decl, otherwise use the location of the struct/union
// keyword.
SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
TagDecl *New = nullptr;
if (Kind == TTK_Enum) {
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
if (TUK != TUK_Definition && !Invalid)
return nullptr;
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo *>())
ED->setIntegerTypeSourceInfo(TI);
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type *>(), 0));
ED->setPromotionType(ED->getIntegerType());
}
} else { // struct/union
New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
nullptr);
}
if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
// Add alignment attributes if necessary; these attributes are checked
// when the ASTContext lays out the structure.
//
// It is important for implementing the correct semantics that this
// happen here (in ActOnTag). The #pragma pack stack is
// maintained as a result of parser callbacks which can occur at
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
}
New->setLexicalDeclContext(CurContext);
return New;
};
LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl);
if (Name && SS.isNotEmpty()) {
// We have a nested-name tag ('struct foo::bar').
// Check for invalid 'foo::'.
if (SS.isInvalid()) {
Name = nullptr;
goto CreateNewDecl;
}
// If this is a friend or a reference to a class in a dependent
// context, don't try to make a decl for it.
if (TUK == TUK_Friend || TUK == TUK_Reference) {
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
return nullptr;
}
} else {
DC = computeDeclContext(SS, true);
if (!DC) {
Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec)
<< SS.getRange();
return nullptr;
}
}
if (RequireCompleteDeclContext(SS, DC))
return nullptr;
SearchDC = DC;
// Look-up name inside 'foo::'.
LookupQualifiedName(Previous, DC);
if (Previous.isAmbiguous())
return nullptr;
if (Previous.empty()) {
// Name lookup did not find anything. However, if the
// nested-name-specifier refers to the current instantiation,
// and that current instantiation has any dependent base
// classes, we might find something at instantiation time: treat
// this as a dependent elaborated-type-specifier.
// But this only makes any sense for reference-like lookups.
if (Previous.wasNotFoundInCurrentInstantiation() &&
(TUK == TUK_Reference || TUK == TUK_Friend)) {
IsDependent = true;
return nullptr;
}
// A tag 'foo::bar' must already exist.
Diag(NameLoc, diag::err_not_tag_in_scope)
<< Kind << Name << DC << SS.getRange();
Name = nullptr;
Invalid = true;
goto CreateNewDecl;
}
} else if (Name) {
// C++14 [class.mem]p14:
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member of class T that is itself a type
if (TUK != TUK_Reference && TUK != TUK_Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
return nullptr;
// If this is a named struct, check to see if there was a previous forward
// declaration or definition.
// FIXME: We're looking into outer scopes here, even when we
// shouldn't be. Doing so can result in ambiguities that we
// shouldn't be diagnosing.
LookupName(Previous, S);
// When declaring or defining a tag, ignore ambiguities introduced
// by types using'ed into this scope.
if (Previous.isAmbiguous() &&
(TUK == TUK_Definition || TUK == TUK_Declaration)) {
LookupResult::Filter F = Previous.makeFilter();
while (F.hasNext()) {
NamedDecl *ND = F.next();
if (!ND->getDeclContext()->getRedeclContext()->Equals(
SearchDC->getRedeclContext()))
F.erase();
}
F.done();
}
// C++11 [namespace.memdef]p3:
// If the name in a friend declaration is neither qualified nor
// a template-id and the declaration is a function or an
// elaborated-type-specifier, the lookup to determine whether
// the entity has been previously declared shall not consider
// any scopes outside the innermost enclosing namespace.
//
// MSVC doesn't implement the above rule for types, so a friend tag
// declaration may be a redeclaration of a type declared in an enclosing
// scope. They do implement this rule for friend functions.
//
// Does it matter that this should be by scope instead of by
// semantic context?
if (!Previous.empty() && TUK == TUK_Friend) {
DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
LookupResult::Filter F = Previous.makeFilter();
bool FriendSawTagOutsideEnclosingNamespace = false;
while (F.hasNext()) {
NamedDecl *ND = F.next();
DeclContext *DC = ND->getDeclContext()->getRedeclContext();
if (DC->isFileContext() &&
!EnclosingNS->Encloses(ND->getDeclContext())) {
if (getLangOpts().MSVCCompat)
FriendSawTagOutsideEnclosingNamespace = true;
else
F.erase();
}
}
F.done();
// Diagnose this MSVC extension in the easy case where lookup would have
// unambiguously found something outside the enclosing namespace.
if (Previous.isSingleResult() && FriendSawTagOutsideEnclosingNamespace) {
NamedDecl *ND = Previous.getFoundDecl();
Diag(NameLoc, diag::ext_friend_tag_redecl_outside_namespace)
<< createFriendTagNNSFixIt(*this, ND, S, NameLoc);
}
}
// Note: there used to be some attempt at recovery here.
if (Previous.isAmbiguous())
return nullptr;
if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
// in Sema::LookupName; is there a better way to deal with this?
while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
SearchDC = SearchDC->getParent();
}
}
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(NameLoc, Previous.getFoundDecl());
// Just pretend that we didn't see the previous declaration.
Previous.clear();
}
if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
DC->Equals(getStdNamespace())) {
if (Name->isStr("bad_alloc")) {
// This is a declaration of or a reference to "std::bad_alloc".
isStdBadAlloc = true;
// If std::bad_alloc has been implicitly declared (but made invisible to
// name lookup), fill in this implicit declaration as the previous
// declaration, so that the declarations get chained appropriately.
if (Previous.empty() && StdBadAlloc)
Previous.addDecl(getStdBadAlloc());
} else if (Name->isStr("align_val_t")) {
isStdAlignValT = true;
if (Previous.empty() && StdAlignValT)
Previous.addDecl(getStdAlignValT());
}
}
// If we didn't find a previous declaration, and this is a reference
// (or friend reference), move to the correct scope. In C++, we
// also need to do a redeclaration lookup there, just in case
// there's a shadow friend decl.
if (Name && Previous.empty() &&
(TUK == TUK_Reference || TUK == TUK_Friend || IsTemplateParamOrArg)) {
if (Invalid) goto CreateNewDecl;
assert(SS.isEmpty());
if (TUK == TUK_Reference || IsTemplateParamOrArg) {
// C++ [basic.scope.pdecl]p5:
// -- for an elaborated-type-specifier of the form
//
// class-key identifier
//
// if the elaborated-type-specifier is used in the
// decl-specifier-seq or parameter-declaration-clause of a
// function defined in namespace scope, the identifier is
// declared as a class-name in the namespace that contains
// the declaration; otherwise, except as a friend
// declaration, the identifier is declared in the smallest
// non-class, non-function-prototype scope that contains the
// declaration.
//
// C99 6.7.2.3p8 has a similar (but not identical!) provision for
// C structs and unions.
//
// It is an error in C++ to declare (rather than define) an enum
// type, including via an elaborated type specifier. We'll
// diagnose that later; for now, declare the enum in the same
// scope as we would have picked for any other tag type.
//
// GNU C also supports this behavior as part of its incomplete
// enum types extension, while GNU C++ does not.
//
// Find the context where we'll be declaring the tag.
// FIXME: We would like to maintain the current DeclContext as the
// lexical context,
SearchDC = getTagInjectionContext(SearchDC);
// Find the scope where we'll be declaring the tag.
S = getTagInjectionScope(S, getLangOpts());
} else {
assert(TUK == TUK_Friend);
// C++ [namespace.memdef]p3:
// If a friend declaration in a non-local class first declares a
// class or function, the friend class or function is a member of
// the innermost enclosing namespace.
SearchDC = SearchDC->getEnclosingNamespaceContext();
}
// In C++, we need to do a redeclaration lookup to properly
// diagnose some problems.
// FIXME: redeclaration lookup is also used (with and without C++) to find a
// hidden declaration so that we don't get ambiguity errors when using a
// type declared by an elaborated-type-specifier. In C that is not correct
// and we should instead merge compatible types found by lookup.
if (getLangOpts().CPlusPlus) {
Previous.setRedeclarationKind(forRedeclarationInCurContext());
LookupQualifiedName(Previous, SearchDC);
} else {
Previous.setRedeclarationKind(forRedeclarationInCurContext());
LookupName(Previous, S);
}
}
// If we have a known previous declaration to use, then use it.
if (Previous.empty() && SkipBody && SkipBody->Previous)
Previous.addDecl(SkipBody->Previous);
if (!Previous.empty()) {
NamedDecl *PrevDecl = Previous.getFoundDecl();
NamedDecl *DirectPrevDecl = Previous.getRepresentativeDecl();
// It's okay to have a tag decl in the same scope as a typedef
// which hides a tag decl in the same scope. Finding this
// insanity with a redeclaration lookup can only actually happen
// in C++.
//
// This is also okay for elaborated-type-specifiers, which is
// technically forbidden by the current standard but which is
// okay according to the likely resolution of an open issue;
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
if (getLangOpts().CPlusPlus) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
TagDecl *Tag = TT->getDecl();
if (Tag->getDeclName() == Name &&
Tag->getDeclContext()->getRedeclContext()
->Equals(TD->getDeclContext()->getRedeclContext())) {
PrevDecl = Tag;
Previous.clear();
Previous.addDecl(Tag);
Previous.resolveKind();
}
}
}
}
// If this is a redeclaration of a using shadow declaration, it must
// declare a tag in the same context. In MSVC mode, we allow a
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
*this, OldTag->getDeclContext(), SearchDC))) {
Diag(KWLoc, diag::err_using_decl_conflict_reverse);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl)
<< 0;
// Recover by ignoring the old declaration.
Previous.clear();
goto CreateNewDecl;
}
}
if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) {
// If this is a use of a previous tag, or if the tag is already declared
// in the same scope (so that the definition/declaration completes or
// rementions the tag), reuse the decl.
if (TUK == TUK_Reference || TUK == TUK_Friend ||
isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// Make sure that this wasn't declared as an enum and now used as a
// struct or something similar.
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
TUK == TUK_Definition, KWLoc,
Name)) {
bool SafeToContinue
= (PrevTagDecl->getTagKind() != TTK_Enum &&
Kind != TTK_Enum);
if (SafeToContinue)
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(SourceRange(KWLoc),
PrevTagDecl->getKindName());
else
Diag(KWLoc, diag::err_use_with_wrong_tag) << Name;
Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
if (SafeToContinue)
Kind = PrevTagDecl->getTagKind();
else {
// Recover by making this an anonymous redefinition.
Name = nullptr;
Previous.clear();
Invalid = true;
}
}
if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
// If this is an elaborated-type-specifier for a scoped enumeration,
// the 'class' keyword is not necessary and not permitted.
if (TUK == TUK_Reference || TUK == TUK_Friend) {
if (ScopedEnum)
Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
<< PrevEnum->isScoped()
<< FixItHint::CreateRemoval(ScopedEnumKWLoc);
return PrevTagDecl;
}
QualType EnumUnderlyingTy;
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
EnumUnderlyingTy = TI->getType().getUnqualifiedType();
else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
EnumUnderlyingTy = QualType(T, 0);
// All conflicts with previous declarations are recovered by
// returning the previous declaration, unless this is a definition,
// in which case we want the caller to bail out.
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
IsFixed, PrevEnum))
return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
}
// C++11 [class.mem]p1:
// A member shall not be declared twice in the member-specification,
// except that a nested class or member class template can be declared
// and then later defined.
if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
S->isDeclScope(PrevDecl)) {
Diag(NameLoc, diag::ext_member_redeclared);
Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
}
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
if (TUK == TUK_Reference || TUK == TUK_Friend) {
if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
} else if (TUK == TUK_Reference &&
(PrevTagDecl->getFriendObjectKind() ==
Decl::FOK_Undeclared ||
PrevDecl->getOwningModule() != getCurrentModule()) &&
SS.isEmpty()) {
// This declaration is a reference to an existing entity, but
// has different visibility from that entity: it either makes
// a friend visible or it makes a type visible in a new module.
// In either case, create a new declaration. We only do this if
// the declaration would have meant the same thing if no prior
// declaration were found, that is, if it was found in the same
// scope where we would have injected a declaration.
if (!getTagInjectionContext(CurContext)->getRedeclContext()
->Equals(PrevDecl->getDeclContext()->getRedeclContext()))
return PrevTagDecl;
// This is in the injected scope, create a new declaration in
// that scope.
S = getTagInjectionScope(S, getLangOpts());
} else {
return PrevTagDecl;
}
}
// Diagnose attempts to redefine a tag.
if (TUK == TUK_Definition) {
if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
// here; we'll catch this in the general case below.
bool IsExplicitSpecializationAfterInstantiation = false;
if (isMemberSpecialization) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
IsExplicitSpecializationAfterInstantiation =
RD->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization;
else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
IsExplicitSpecializationAfterInstantiation =
ED->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization;
}
// Note that clang allows ODR-like semantics for ObjC/C, i.e., do
// not keep more that one definition around (merge them). However,
// ensure the decl passes the structural compatibility check in
// C11 6.2.7/1 (or 6.1.2.6/1 in C89).
NamedDecl *Hidden = nullptr;
if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
// There is a definition of this tag, but it is not visible. We
// explicitly make use of C++'s one definition rule here, and
// assume that this definition is identical to the hidden one
// we already have. Make the existing definition visible and
// use it in place of this one.
if (!getLangOpts().CPlusPlus) {
// Postpone making the old definition visible until after we
// complete parsing the new one and do the structural
// comparison.
SkipBody->CheckSameAsPrevious = true;
SkipBody->New = createTagFromNewDecl();
SkipBody->Previous = Def;
return Def;
} else {
SkipBody->ShouldSkip = true;
SkipBody->Previous = Def;
makeMergedDefinitionVisible(Hidden);
// Carry on and handle it like a normal definition. We'll
// skip starting the definitiion later.
}
} else if (!IsExplicitSpecializationAfterInstantiation) {
// A redeclaration in function prototype scope in C isn't
// visible elsewhere, so merely issue a warning.
if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
else
Diag(NameLoc, diag::err_redefinition) << Name;
notePreviousDefinition(Def,
NameLoc.isValid() ? NameLoc : KWLoc);
// If this is a redefinition, recover by making this
// struct be anonymous, which will make any later
// references get the previous definition.
Name = nullptr;
Previous.clear();
Invalid = true;
}
} else {
// If the type is currently being defined, complain
// about a nested redefinition.
auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
if (TD->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
diag::note_previous_definition);
Name = nullptr;
Previous.clear();
Invalid = true;
}
}
// Okay, this is definition of a previously declared or referenced
// tag. We're going to create a new Decl for it.
}
// Okay, we're going to make a redeclaration. If this is some kind
// of reference, make sure we build the redeclaration in the same DC
// as the original, and ignore the current access specifier.
if (TUK == TUK_Friend || TUK == TUK_Reference) {
SearchDC = PrevTagDecl->getDeclContext();
AS = AS_none;
}
}
// If we get here we have (another) forward declaration or we
// have a definition. Just create a new decl.
} else {
// If we get here, this is a definition of a new tag type in a nested
// scope, e.g. "struct foo; void bar() { struct foo; }", just create a
// new decl/type. We set PrevDecl to NULL so that the entities
// have distinct types.
Previous.clear();
}
// If we get here, we're going to create a new Decl. If PrevDecl
// is non-NULL, it's a definition of the tag declared by
// PrevDecl. If it's NULL, we have a new definition.
// Otherwise, PrevDecl is not a tag, but was found with tag
// lookup. This is only actually possible in C++, where a few
// things like templates still live in the tag namespace.
} else {
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_non_tag) << PrevDecl << NTK
<< Kind;
Diag(PrevDecl->getLocation(), diag::note_declared_at);
Invalid = true;
// Otherwise, only diagnose if the declaration is in scope.
} else if (!isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// do nothing
// Diagnose implicit declarations introduced by elaborated types.
} else if (TUK == TUK_Reference || TUK == TUK_Friend) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
Invalid = true;
// Otherwise it's a declaration. Call out a particularly common
// case here.
} else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(PrevDecl)) {
unsigned Kind = 0;
if (isa<TypeAliasDecl>(PrevDecl)) Kind = 1;
Diag(NameLoc, diag::err_tag_definition_of_typedef)
<< Name << Kind << TND->getUnderlyingType();
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
Invalid = true;
// Otherwise, diagnose.
} else {
// The tag name clashes with something else in the target scope,
// issue an error and recover by making this tag be anonymous.
Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
notePreviousDefinition(PrevDecl, NameLoc);
Name = nullptr;
Invalid = true;
}
// The existing declaration isn't relevant to us; we're in a
// new scope, so clear out the previous declaration.
Previous.clear();
}
}
CreateNewDecl:
TagDecl *PrevDecl = nullptr;
if (Previous.isSingleResult())
PrevDecl = cast<TagDecl>(Previous.getFoundDecl());
// If there is an identifier, use the location of the identifier as the
// location of the decl, otherwise use the location of the struct/union
// keyword.
SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
// Otherwise, create a new declaration. If there is a previous
// declaration of the same entity, the two will be linked via
// PrevDecl.
TagDecl *New;
if (Kind == TTK_Enum) {
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// enum X { A, B, C } D; D should chain to X.
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
ScopedEnumUsesClassTag, IsFixed);
if (isStdAlignValT && (!StdAlignValT || getStdAlignValT()->isImplicit()))
StdAlignValT = cast<EnumDecl>(New);
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
if (IsFixed && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
}
else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
Diag(Loc, diag::ext_forward_ref_enum_def)
<< New;
Diag(Def->getLocation(), diag::note_previous_definition);
} else {
unsigned DiagID = diag::ext_forward_ref_enum;
if (getLangOpts().MSVCCompat)
DiagID = diag::ext_ms_forward_ref_enum;
else if (getLangOpts().CPlusPlus)
DiagID = diag::err_forward_ref_enum;
Diag(Loc, DiagID);
}
}
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
ED->setIntegerTypeSourceInfo(TI);
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
ED->setPromotionType(ED->getIntegerType());
assert(ED->isComplete() && "enum with type should be complete");
}
} else {
// struct/union/class
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// struct X { int A; } D; D should chain to X.
if (getLangOpts().CPlusPlus) {
// FIXME: Look for a way to use RecordDecl for simple structs.
New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
cast_or_null<CXXRecordDecl>(PrevDecl));
if (isStdBadAlloc && (!StdBadAlloc || getStdBadAlloc()->isImplicit()))
StdBadAlloc = cast<CXXRecordDecl>(New);
} else
New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
cast_or_null<RecordDecl>(PrevDecl));
}
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
if (getLangOpts().CPlusPlus && (IsTypeSpecifier || IsTemplateParamOrArg) &&
TUK == TUK_Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
}
if (!Invalid && getLangOpts().CPlusPlus && TUK == TUK_Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
<< Context.getTagDeclType(New);
Invalid = true;
}
// Maybe add qualifier info.
if (SS.isNotEmpty()) {
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
// nested-name-specifier against the current context.
if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
isMemberSpecialization))
Invalid = true;
New->setQualifierInfo(SS.getWithLocInContext(Context));
if (TemplateParameterLists.size() > 0) {
New->setTemplateParameterListsInfo(Context, TemplateParameterLists);
}
}
else
Invalid = true;
}
if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
//
// It is important for implementing the correct semantics that this
// happen here (in ActOnTag). The #pragma pack stack is
// maintained as a result of parser callbacks which can occur at
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
}
if (ModulePrivateLoc.isValid()) {
if (isMemberSpecialization)
Diag(New->getLocation(), diag::err_module_private_specialization)
<< 2
<< FixItHint::CreateRemoval(ModulePrivateLoc);
// __module_private__ does not apply to local classes. However, we only
// diagnose this as an error when the declaration specifiers are
// freestanding. Here, we just ignore the __module_private__.
else if (!SearchDC->isFunctionOrMethod())
New->setModulePrivate();
}
// If this is a specialization of a member class (of a class template),
// check the specialization.
if (isMemberSpecialization && CheckMemberSpecialization(New, Previous))
Invalid = true;
// If we're declaring or defining a tag in function prototype scope in C,
// note that this type can only be used within the function and add it to
// the list of decls to inject into the function definition scope.
if ((Name || Kind == TTK_Enum) &&
getNonFieldDeclScope(S)->isFunctionPrototypeScope()) {
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
if (TUK == TUK_Definition && !IsTypeSpecifier) {
Diag(Loc, diag::err_type_defined_in_param_type)
<< Name;
Invalid = true;
}
} else if (!PrevDecl) {
Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
}
}
if (Invalid)
New->setInvalidDecl();
// Set the lexical context. If the tag has a C++ scope specifier, the
// lexical context will be different from the semantic context.
New->setLexicalDeclContext(CurContext);
// Mark this as a friend decl if applicable.
// In Microsoft mode, a friend declaration also acts as a forward
// declaration so we always pass true to setObjectOfFriendDecl to make
// the tag name visible.
if (TUK == TUK_Friend)
New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
// Set the access specifier.
if (!Invalid && SearchDC->isRecord())
SetMemberAccessSpecifier(New, PrevDecl, AS);
if (PrevDecl)
CheckRedeclarationModuleOwnership(New, PrevDecl);
if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
if (TUK == TUK_Friend) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
New->setAccess(PrevDecl->getAccess());
DeclContext *DC = New->getDeclContext()->getRedeclContext();
DC->makeDeclVisibleInContext(New);
if (Name) // can be null along some error paths
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
} else if (Name) {
S = getNonFieldDeclScope(S);
PushOnScopeChains(New, S, true);
} else {
CurContext->addDecl(New);
}
// If this is the C FILE type, notify the AST context.
if (IdentifierInfo *II = New->getIdentifier())
if (!New->isInvalidDecl() &&
New->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
II->isStr("FILE"))
Context.setFILEDecl(New);
if (PrevDecl)
mergeDeclAttributes(New, PrevDecl);
// If there's a #pragma GCC visibility in scope, set the visibility of this
// record.
AddPushedVisibilityAttribute(New);
if (isMemberSpecialization && !New->isInvalidDecl())
CompleteMemberSpecialization(New, Previous);
OwnedDecl = true;
// In C++, don't return an invalid declaration. We can't recover well from
// the cases where we make the type anonymous.
if (Invalid && getLangOpts().CPlusPlus) {
if (New->isBeingDefined())
if (auto RD = dyn_cast<RecordDecl>(New))
RD->completeDefinition();
return nullptr;
} else if (SkipBody && SkipBody->ShouldSkip) {
return SkipBody->Previous;
} else {
return New;
}
}
void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
// Enter the tag context.
PushDeclContext(S, Tag);
ActOnDocumentableDecl(TagD);
// If there's a #pragma GCC visibility in scope, set the visibility of this
// record.
AddPushedVisibilityAttribute(Tag);
}
bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody) {
if (!hasStructuralCompatLayout(Prev, SkipBody.New))
return false;
// Make the previous decl visible.
makeMergedDefinitionVisible(SkipBody.Previous);
return true;
}
Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
assert(isa<ObjCContainerDecl>(IDecl) &&
"ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
DeclContext *OCD = cast<DeclContext>(IDecl);
assert(getContainingDC(OCD) == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = OCD;
return IDecl;
}
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc) {
AdjustDeclIfTemplate(TagD);
CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
FieldCollector->StartClass();
if (!Record->getIdentifier())
return;
if (FinalLoc.isValid())
Record->addAttr(new (Context)
FinalAttr(FinalLoc, Context, IsFinalSpelledSealed));
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
// class itself; this is known as the injected-class-name. For
// purposes of access checking, the injected-class-name is treated
// as if it were a public member name.
CXXRecordDecl *InjectedClassName = CXXRecordDecl::Create(
Context, Record->getTagKind(), CurContext, Record->getBeginLoc(),
Record->getLocation(), Record->getIdentifier(),
/*PrevDecl=*/nullptr,
/*DelayTypeCreation=*/true);
Context.getTypeDeclType(InjectedClassName, Record);
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
InjectedClassName->setDescribedClassTemplate(Template);
PushOnScopeChains(InjectedClassName, S);
assert(InjectedClassName->isInjectedClassName() &&
"Broken injected-class-name");
}
void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
SourceRange BraceRange) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setBraceRange(BraceRange);
// Make sure we "complete" the definition even it is invalid.
if (Tag->isBeingDefined()) {
assert(Tag->isInvalidDecl() && "We should already have completed it");
if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
RD->completeDefinition();
}
if (isa<CXXRecordDecl>(Tag)) {
FieldCollector->FinishClass();
}
// Exit this scope of this tag's definition.
PopDeclContext();
if (getCurLexicalContext()->isObjCContainer() &&
Tag->getDeclContext()->isFileContext())
Tag->setTopLevelDeclInObjCContainer();
// Notify the consumer that we've defined a tag.
if (!Tag->isInvalidDecl())
Consumer.HandleTagDeclDefinition(Tag);
}
void Sema::ActOnObjCContainerFinishDefinition() {
// Exit this scope of this interface definition.
PopDeclContext();
}
void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
assert(DC == CurContext && "Mismatch of container contexts");
OriginalLexicalContext = DC;
ActOnObjCContainerFinishDefinition();
}
void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
ActOnObjCContainerStartDefinition(cast<Decl>(DC));
OriginalLexicalContext = nullptr;
}
void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setInvalidDecl();
// Make sure we "complete" the definition even it is invalid.
if (Tag->isBeingDefined()) {
if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
RD->completeDefinition();
}
// We're undoing ActOnTagStartDefinition here, not
// ActOnStartCXXMemberDeclarations, so we don't have to mess with
// the FieldCollector.
PopDeclContext();
}
// Note that FieldName may be null for anonymous bitfields.
ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth) {
// Default to true; that shouldn't confuse checks for emptiness
if (ZeroWidth)
*ZeroWidth = true;
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
// Handle incomplete types with specific error.
if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
return ExprError();
if (FieldName)
return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
<< FieldName << FieldTy << BitWidth->getSourceRange();
return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
<< FieldTy << BitWidth->getSourceRange();
} else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
UPPC_BitFieldWidth))
return ExprError();
// If the bit-width is type- or value-dependent, don't try to check
// it now.
if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
return BitWidth;
llvm::APSInt Value;
ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
if (ICE.isInvalid())
return ICE;
BitWidth = ICE.get();
if (Value != 0 && ZeroWidth)
*ZeroWidth = false;
// Zero-width bitfield is ok for anonymous field.
if (Value == 0 && FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
if (Value.isSigned() && Value.isNegative()) {
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
<< FieldName << Value.toString(10);
return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
<< Value.toString(10);
}
if (!FieldTy->isDependentType()) {
uint64_t TypeStorageSize = Context.getTypeSize(FieldTy);
uint64_t TypeWidth = Context.getIntWidth(FieldTy);
bool BitfieldIsOverwide = Value.ugt(TypeWidth);
// Over-wide bitfields are an error in C or when using the MSVC bitfield
// ABI.
bool CStdConstraintViolation =
BitfieldIsOverwide && !getLangOpts().CPlusPlus;
bool MSBitfieldViolation =
Value.ugt(TypeStorageSize) &&
(IsMsStruct || Context.getTargetInfo().getCXXABI().isMicrosoft());
if (CStdConstraintViolation || MSBitfieldViolation) {
unsigned DiagWidth =
CStdConstraintViolation ? TypeWidth : TypeStorageSize;
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
<< FieldName << (unsigned)Value.getZExtValue()
<< !CStdConstraintViolation << DiagWidth;
return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
<< (unsigned)Value.getZExtValue() << !CStdConstraintViolation
<< DiagWidth;
}
// Warn on types where the user might conceivably expect to get all
// specified bits as value bits: that's all integral types other than
// 'bool'.
if (BitfieldIsOverwide && !FieldTy->isBooleanType()) {
if (FieldName)
Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
<< FieldName << (unsigned)Value.getZExtValue()
<< (unsigned)TypeWidth;
else
Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_width)
<< (unsigned)Value.getZExtValue() << (unsigned)TypeWidth;
}
}
return BitWidth;
}
/// ActOnField - Each field of a C struct/union is passed into this in order
/// to create a FieldDecl object for it.
Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth) {
FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
DeclStart, D, static_cast<Expr*>(BitfieldWidth),
/*InitStyle=*/ICIS_NoInit, AS_public);
return Res;
}
/// HandleField - Analyze a field of a C struct or a C++ data member.
///
FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation DeclStart,
Declarator &D, Expr *BitWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS) {
if (D.isDecompositionDeclarator()) {
const DecompositionDeclarator &Decomp = D.getDecompositionDeclarator();
Diag(Decomp.getLSquareLoc(), diag::err_decomp_decl_context)
<< Decomp.getSourceRange();
return nullptr;
}
IdentifierInfo *II = D.getIdentifier();
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (getLangOpts().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
UPPC_DataMemberType)) {
D.setInvalidType();
T = Context.IntTy;
TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
}
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
// Check to see if this name was declared as a member previously
NamedDecl *PrevDecl = nullptr;
LookupResult Previous(*this, II, Loc, LookupMemberName,
ForVisibleRedeclaration);
LookupName(Previous, S);
switch (Previous.getResultKind()) {
case LookupResult::Found:
case LookupResult::FoundUnresolvedValue:
PrevDecl = Previous.getAsSingle<NamedDecl>();
break;
case LookupResult::FoundOverloaded:
PrevDecl = Previous.getRepresentativeDecl();
break;
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
case LookupResult::Ambiguous:
break;
}
Previous.suppressDiagnostics();
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
}
if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
PrevDecl = nullptr;
bool Mutable
= (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
SourceLocation TSSL = D.getBeginLoc();
FieldDecl *NewFD
= CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle,
TSSL, AS, PrevDecl, &D);
if (NewFD->isInvalidDecl())
Record->setInvalidDecl();
if (D.getDeclSpec().isModulePrivateSpecified())
NewFD->setModulePrivate();
if (NewFD->isInvalidDecl() && PrevDecl) {
// Don't introduce NewFD into scope; there's already something
// with the same name in the same scope.
} else if (II) {
PushOnScopeChains(NewFD, S);
} else
Record->addDecl(NewFD);
return NewFD;
}
/// Build a new FieldDecl and check its well-formedness.
///
/// This routine builds a new FieldDecl given the fields name, type,
/// record, etc. \p PrevDecl should refer to any previous declaration
/// with the same name and in the same scope as the field to be
/// created.
///
/// \returns a new FieldDecl.
///
/// \todo The Declarator argument is a hack. It will be removed once
FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D) {
IdentifierInfo *II = Name.getAsIdentifierInfo();
bool InvalidDecl = false;
if (D) InvalidDecl = D->isInvalidType();
// If we receive a broken type, recover by assuming 'int' and
// marking this declaration as invalid.
if (T.isNull()) {
InvalidDecl = true;
T = Context.IntTy;
}
QualType EltTy = Context.getBaseElementType(T);
if (!EltTy->isDependentType()) {
if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
// Fields of incomplete type force their record to be invalid.
Record->setInvalidDecl();
InvalidDecl = true;
} else {
NamedDecl *Def;
EltTy->isIncompleteType(&Def);
if (Def && Def->isInvalidDecl()) {
Record->setInvalidDecl();
InvalidDecl = true;
}
}
}
// TR 18037 does not allow fields to be declared with address space
if (T.getQualifiers().hasAddressSpace() || T->isDependentAddressSpaceType() ||
T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
Diag(Loc, diag::err_field_with_address_space);
Record->setInvalidDecl();
InvalidDecl = true;
}
if (LangOpts.OpenCL) {
// OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
// used as structure or union field: image, sampler, event or block types.
if (T->isEventT() || T->isImageType() || T->isSamplerT() ||
T->isBlockPointerType()) {
Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
Record->setInvalidDecl();
InvalidDecl = true;
}
// OpenCL v1.2 s6.9.c: bitfields are not supported.
if (BitWidth) {
Diag(Loc, diag::err_opencl_bitfields);
InvalidDecl = true;
}
}
// Anonymous bit-fields cannot be cv-qualified (CWG 2229).
if (!InvalidDecl && getLangOpts().CPlusPlus && !II && BitWidth &&
T.hasQualifiers()) {
InvalidDecl = true;
Diag(Loc, diag::err_anon_bitfield_qualifiers);
}
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
if (!InvalidDecl && T->isVariablyModifiedType()) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo =
TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
SizeIsNegative,
Oversized);
if (FixedTInfo) {
Diag(Loc, diag::warn_illegal_constant_array_size);
TInfo = FixedTInfo;
T = FixedTInfo->getType();
} else {
if (SizeIsNegative)
Diag(Loc, diag::err_typecheck_negative_array_size);
else if (Oversized.getBoolValue())
Diag(Loc, diag::err_array_too_large)
<< Oversized.toString(10);
else
Diag(Loc, diag::err_typecheck_field_variable_size);
InvalidDecl = true;
}
}
// Fields can not have abstract class types
if (!InvalidDecl && RequireNonAbstractType(Loc, T,
diag::err_abstract_type_in_decl,
AbstractFieldType))
InvalidDecl = true;
bool ZeroWidth = false;
if (InvalidDecl)
BitWidth = nullptr;
// If this is declared as a bit-field, check the bit-field.
if (BitWidth) {
BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth,
&ZeroWidth).get();
if (!BitWidth) {
InvalidDecl = true;
BitWidth = nullptr;
ZeroWidth = false;
}
}
// Check that 'mutable' is consistent with the type of the declaration.
if (!InvalidDecl && Mutable) {
unsigned DiagID = 0;
if (T->isReferenceType())
DiagID = getLangOpts().MSVCCompat ? diag::ext_mutable_reference
: diag::err_mutable_reference;
else if (T.isConstQualified())
DiagID = diag::err_mutable_const;
if (DiagID) {
SourceLocation ErrLoc = Loc;
if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid())
ErrLoc = D->getDeclSpec().getStorageClassSpecLoc();
Diag(ErrLoc, DiagID);
if (DiagID != diag::ext_mutable_reference) {
Mutable = false;
InvalidDecl = true;
}
}
}
// C++11 [class.union]p8 (DR1460):
// At most one variant member of a union may have a
// brace-or-equal-initializer.
if (InitStyle != ICIS_NoInit)
checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Record), Loc);
FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
BitWidth, Mutable, InitStyle);
if (InvalidDecl)
NewFD->setInvalidDecl();
if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
}
if (!InvalidDecl && getLangOpts().CPlusPlus) {
if (Record->isUnion()) {
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
if (RDecl->getDefinition()) {
// C++ [class.union]p1: An object of a class with a non-trivial
// constructor, a non-trivial copy constructor, a non-trivial
// destructor, or a non-trivial copy assignment operator
// cannot be a member of a union, nor can an array of such
// objects.
if (CheckNontrivialField(NewFD))
NewFD->setInvalidDecl();
}
}
// C++ [class.union]p1: If a union contains a member of reference type,
// the program is ill-formed, except when compiling with MSVC extensions
// enabled.
if (EltTy->isReferenceType()) {
Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
diag::ext_union_member_of_reference_type :
diag::err_union_member_of_reference_type)
<< NewFD->getDeclName() << EltTy;
if (!getLangOpts().MicrosoftExt)
NewFD->setInvalidDecl();
}
}
}
// FIXME: We need to pass in the attributes given an AST
// representation, not a parser representation.
if (D) {
// FIXME: The current scope is almost... but not entirely... correct here.
ProcessDeclAttributes(getCurScope(), NewFD, *D);
if (NewFD->hasAttrs())
CheckAlignasUnderalignment(NewFD);
}
// In auto-retain/release, infer strong retension for fields of
// retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
NewFD->setInvalidDecl();
if (T.isObjCGCWeak())
Diag(Loc, diag::warn_attribute_weak_on_field);
NewFD->setAccess(AS);
return NewFD;
}
bool Sema::CheckNontrivialField(FieldDecl *FD) {
assert(FD);
assert(getLangOpts().CPlusPlus && "valid check only for C++");
if (FD->isInvalidDecl() || FD->getType()->isDependentType())
return false;
QualType EltTy = Context.getBaseElementType(FD->getType());
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl());
if (RDecl->getDefinition()) {
// We check for copy constructors before constructors
// because otherwise we'll never get complaints about
// copy constructors.
CXXSpecialMember member = CXXInvalid;
// We're required to check for any non-trivial constructors. Since the
// implicit default constructor is suppressed if there are any
// user-declared constructors, we just need to check that there is a
// trivial default constructor and a trivial copy constructor. (We don't
// worry about move constructors here, since this is a C++98 check.)
if (RDecl->hasNonTrivialCopyConstructor())
member = CXXCopyConstructor;
else if (!RDecl->hasTrivialDefaultConstructor())
member = CXXDefaultConstructor;
else if (RDecl->hasNonTrivialCopyAssignment())
member = CXXCopyAssignment;
else if (RDecl->hasNonTrivialDestructor())
member = CXXDestructor;
if (member != CXXInvalid) {
if (!getLangOpts().CPlusPlus11 &&
getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
// Objective-C++ ARC: it is an error to have a non-trivial field of
// a union. However, system headers in Objective-C programs
// occasionally have Objective-C lifetime objects within unions,
// and rather than cause the program to fail, we make those
// members unavailable.
SourceLocation Loc = FD->getLocation();
if (getSourceManager().isInSystemHeader(Loc)) {
if (!FD->hasAttr<UnavailableAttr>())
FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
return false;
}
}
Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
diag::err_illegal_union_or_anon_struct_member)
<< FD->getParent()->isUnion() << FD->getDeclName() << member;
DiagnoseNontrivial(RDecl, member);
return !getLangOpts().CPlusPlus11;
}
}
}
return false;
}
/// TranslateIvarVisibility - Translate visibility from a token ID to an
/// AST enum value.
static ObjCIvarDecl::AccessControl
TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
switch (ivarVisibility) {
default: llvm_unreachable("Unknown visitibility kind");
case tok::objc_private: return ObjCIvarDecl::Private;
case tok::objc_public: return ObjCIvarDecl::Public;
case tok::objc_protected: return ObjCIvarDecl::Protected;
case tok::objc_package: return ObjCIvarDecl::Package;
}
}
/// ActOnIvar - Each ivar field of an objective-c class is passed into this
/// in order to create an IvarDecl object for it.
Decl *Sema::ActOnIvar(Scope *S,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind Visibility) {
IdentifierInfo *II = D.getIdentifier();
Expr *BitWidth = (Expr*)BitfieldWidth;
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
// FIXME: Unnamed fields can be handled in various different ways, for
// example, unnamed unions inject all members into the struct namespace!
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (BitWidth) {
// 6.7.2.1p3, 6.7.2.1p4
BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).get();
if (!BitWidth)
D.setInvalidType();
} else {
// Not a bitfield.
// validate II.
}
if (T->isReferenceType()) {
Diag(Loc, diag::err_ivar_reference_type);
D.setInvalidType();
}
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
else if (T->isVariablyModifiedType()) {
Diag(Loc, diag::err_typecheck_ivar_variable_size);
D.setInvalidType();
}
// Get the visibility (access control) for this ivar.
ObjCIvarDecl::AccessControl ac =
Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
: ObjCIvarDecl::None;
// Must set ivar's DeclContext to its enclosing interface.
ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
return nullptr;
ObjCContainerDecl *EnclosingContext;
if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
if (LangOpts.ObjCRuntime.isFragile()) {
// Case of ivar declared in an implementation. Context is that of its class.
EnclosingContext = IMPDecl->getClassInterface();
assert(EnclosingContext && "Implementation has no class interface!");
}
else
EnclosingContext = EnclosingDecl;
} else {
if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
return nullptr;
}
}
EnclosingContext = EnclosingDecl;
}
// Construct the decl.
ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext,
DeclStart, Loc, II, T,
TInfo, ac, (Expr *)BitfieldWidth);
if (II) {
NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
ForVisibleRedeclaration);
if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
&& !isa<TagDecl>(PrevDecl)) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
NewID->setInvalidDecl();
}
}
// Process attributes attached to the ivar.
ProcessDeclAttributes(S, NewID, D);
if (D.isInvalidType())
NewID->setInvalidDecl();
// In ARC, infer 'retaining' for ivars of retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
NewID->setInvalidDecl();
if (D.getDeclSpec().isModulePrivateSpecified())
NewID->setModulePrivate();
if (II) {
// FIXME: When interfaces are DeclContexts, we'll need to add
// these to the interface.
S->AddDecl(NewID);
IdResolver.AddDecl(NewID);
}
if (LangOpts.ObjCRuntime.isNonFragile() &&
!NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl))
Diag(Loc, diag::warn_ivars_in_interface);
return NewID;
}
/// ActOnLastBitfield - This routine handles synthesized bitfields rules for
/// class and class extensions. For every class \@interface and class
/// extension \@interface, if the last ivar is a bitfield of any type,
/// then add an implicit `char :0` ivar to the end of that interface.
void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
SmallVectorImpl<Decl *> &AllIvarDecls) {
if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty())
return;
Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl);
if (!Ivar->isBitField() || Ivar->isZeroLengthBitField(Context))
return;
ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext);
if (!ID) {
if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CurContext)) {
if (!CD->IsClassExtension())
return;
}
// No need to add this to end of @implementation.
else
return;
}
// All conditions are met. Add a new bitfield to the tail end of ivars.
llvm::APInt Zero(Context.getTypeSize(Context.IntTy), 0);
Expr * BW = IntegerLiteral::Create(Context, Zero, Context.IntTy, DeclLoc);
Ivar = ObjCIvarDecl::Create(Context, cast<ObjCContainerDecl>(CurContext),
DeclLoc, DeclLoc, nullptr,
Context.CharTy,
Context.getTrivialTypeSourceInfo(Context.CharTy,
DeclLoc),
ObjCIvarDecl::Private, BW,
true);
AllIvarDecls.push_back(Ivar);
}
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &Attrs) {
assert(EnclosingDecl && "missing record or interface decl");
// If this is an Objective-C @implementation or category and we have
// new fields here we should reset the layout of the interface since
// it will now change.
if (!Fields.empty() && isa<ObjCContainerDecl>(EnclosingDecl)) {
ObjCContainerDecl *DC = cast<ObjCContainerDecl>(EnclosingDecl);
switch (DC->getKind()) {
default: break;
case Decl::ObjCCategory:
Context.ResetObjCLayout(cast<ObjCCategoryDecl>(DC)->getClassInterface());
break;
case Decl::ObjCImplementation:
Context.
ResetObjCLayout(cast<ObjCImplementationDecl>(DC)->getClassInterface());
break;
}
}
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(EnclosingDecl);
// Start counting up the number of named members; make sure to include
// members of anonymous structs and unions in the total.
unsigned NumNamedMembers = 0;
if (Record) {
for (const auto *I : Record->decls()) {
if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
if (IFD->getDeclName())
++NumNamedMembers;
}
}
// Verify that all the fields are okay.
SmallVector<FieldDecl*, 32> RecFields;
for (ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
i != end; ++i) {
FieldDecl *FD = cast<FieldDecl>(*i);
// Get the type for the field.
const Type *FDTy = FD->getType().getTypePtr();
if (!FD->isAnonymousStructOrUnion()) {
// Remember all fields written by the user.
RecFields.push_back(FD);
}
// If the field is already invalid for some reason, don't emit more
// diagnostics about it.
if (FD->isInvalidDecl()) {
EnclosingDecl->setInvalidDecl();
continue;
}
// C99 6.7.2.1p2:
// A structure or union shall not contain a member with
// incomplete or function type (hence, a structure shall not
// contain an instance of itself, but may contain a pointer to
// an instance of itself), except that the last member of a
// structure with more than one named member may have incomplete
// array type; such a structure (and any union containing,
// possibly recursively, a member that is such a structure)
// shall not be a member of a structure or an element of an
// array.
bool IsLastField = (i + 1 == Fields.end());
if (FDTy->isFunctionType()) {
// Field declared as a function.
Diag(FD->getLocation(), diag::err_field_declared_as_function)
<< FD->getDeclName();
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (FDTy->isIncompleteArrayType() &&
(Record || isa<ObjCContainerDecl>(EnclosingDecl))) {
if (Record) {
// Flexible array member.
// Microsoft and g++ is more permissive regarding flexible array.
// It will accept flexible array in union and also
// as the sole element of a struct/class.
unsigned DiagID = 0;
if (!Record->isUnion() && !IsLastField) {
Diag(FD->getLocation(), diag::err_flexible_array_not_at_end)
<< FD->getDeclName() << FD->getType() << Record->getTagKind();
Diag((*(i + 1))->getLocation(), diag::note_next_field_declaration);
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (Record->isUnion())
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_union_ms
: getLangOpts().CPlusPlus
? diag::ext_flexible_array_union_gnu
: diag::err_flexible_array_union;
else if (NumNamedMembers < 1)
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_empty_aggregate_ms
: getLangOpts().CPlusPlus
? diag::ext_flexible_array_empty_aggregate_gnu
: diag::err_flexible_array_empty_aggregate;
if (DiagID)
Diag(FD->getLocation(), DiagID) << FD->getDeclName()
<< Record->getTagKind();
// While the layout of types that contain virtual bases is not specified
// by the C++ standard, both the Itanium and Microsoft C++ ABIs place
// virtual bases after the derived members. This would make a flexible
// array member declared at the end of an object not adjacent to the end
// of the type.
if (CXXRecord && CXXRecord->getNumVBases() != 0)
Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
<< FD->getDeclName() << Record->getTagKind();
if (!getLangOpts().C99)
Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
<< FD->getDeclName() << Record->getTagKind();
// If the element type has a non-trivial destructor, we would not
// implicitly destroy the elements, so disallow it for now.
//
// FIXME: GCC allows this. We should probably either implicitly delete
// the destructor of the containing class, or just allow this.
QualType BaseElem = Context.getBaseElementType(FD->getType());
if (!BaseElem->isDependentType() && BaseElem.isDestructedType()) {
Diag(FD->getLocation(), diag::err_flexible_array_has_nontrivial_dtor)
<< FD->getDeclName() << FD->getType();
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
}
// Okay, we have a legal flexible array member at the end of the struct.
Record->setHasFlexibleArrayMember(true);
} else {
// In ObjCContainerDecl ivars with incomplete array type are accepted,
// unless they are followed by another ivar. That check is done
// elsewhere, after synthesized ivars are known.
}
} else if (!FDTy->isDependentType() &&
RequireCompleteType(FD->getLocation(), FD->getType(),
diag::err_field_incomplete)) {
// Incomplete type
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
// A type which contains a flexible array member is considered to be a
// flexible array member.
Record->setHasFlexibleArrayMember(true);
if (!Record->isUnion()) {
// If this is a struct/class and this is not the last element, reject
// it. Note that GCC supports variable sized arrays in the middle of
// structures.
if (!IsLastField)
Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct)
<< FD->getDeclName() << FD->getType();
else {
// We support flexible arrays at the end of structs in
// other structs as an extension.
Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
<< FD->getDeclName();
}
}
}
if (isa<ObjCContainerDecl>(EnclosingDecl) &&
RequireNonAbstractType(FD->getLocation(), FD->getType(),
diag::err_abstract_type_in_decl,
AbstractIvarType)) {
// Ivars can not have abstract class types
FD->setInvalidDecl();
}
if (Record && FDTTy->getDecl()->hasObjectMember())
Record->setHasObjectMember(true);
if (Record && FDTTy->getDecl()->hasVolatileMember())
Record->setHasVolatileMember(true);
} else if (FDTy->isObjCObjectType()) {
/// A field cannot be an Objective-c object
Diag(FD->getLocation(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(FD->getLocation(), "*");
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
} else if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong())
Record->setHasObjectMember(true);
else if (Context.getAsArrayType(FD->getType())) {
QualType BaseType = Context.getBaseElementType(FD->getType());
if (BaseType->isRecordType() &&
BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
Record->setHasObjectMember(true);
else if (BaseType->isObjCObjectPointerType() ||
BaseType.isObjCGCStrong())
Record->setHasObjectMember(true);
}
}
if (Record && !getLangOpts().CPlusPlus && !FD->hasAttr<UnavailableAttr>()) {
QualType FT = FD->getType();
if (FT.isNonTrivialToPrimitiveDefaultInitialize()) {
Record->setNonTrivialToPrimitiveDefaultInitialize(true);
if (FT.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
Record->isUnion())
Record->setHasNonTrivialToPrimitiveDefaultInitializeCUnion(true);
}
QualType::PrimitiveCopyKind PCK = FT.isNonTrivialToPrimitiveCopy();
if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial) {
Record->setNonTrivialToPrimitiveCopy(true);
if (FT.hasNonTrivialToPrimitiveCopyCUnion() || Record->isUnion())
Record->setHasNonTrivialToPrimitiveCopyCUnion(true);
}
if (FT.isDestructedType()) {
Record->setNonTrivialToPrimitiveDestroy(true);
Record->setParamDestroyedInCallee(true);
if (FT.hasNonTrivialToPrimitiveDestructCUnion() || Record->isUnion())
Record->setHasNonTrivialToPrimitiveDestructCUnion(true);
}
if (const auto *RT = FT->getAs<RecordType>()) {
if (RT->getDecl()->getArgPassingRestrictions() ==
RecordDecl::APK_CanNeverPassInRegs)
Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
} else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak)
Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
}
if (Record && FD->getType().isVolatileQualified())
Record->setHasVolatileMember(true);
// Keep track of the number of named members.
if (FD->getIdentifier())
++NumNamedMembers;
}
// Okay, we successfully defined 'Record'.
if (Record) {
bool Completed = false;
if (CXXRecord) {
if (!CXXRecord->isInvalidDecl()) {
// Set access bits correctly on the directly-declared conversions.
for (CXXRecordDecl::conversion_iterator
I = CXXRecord->conversion_begin(),
E = CXXRecord->conversion_end(); I != E; ++I)
I.setAccess((*I)->getAccess());
}
if (!CXXRecord->isDependentType()) {
// Add any implicitly-declared members to this class.
AddImplicitlyDeclaredMembersToClass(CXXRecord);
if (!CXXRecord->isInvalidDecl()) {
// If we have virtual base classes, we may end up finding multiple
// final overriders for a given virtual function. Check for this
// problem now.
if (CXXRecord->getNumVBases()) {
CXXFinalOverriderMap FinalOverriders;
CXXRecord->getFinalOverriders(FinalOverriders);
for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
MEnd = FinalOverriders.end();
M != MEnd; ++M) {
for (OverridingMethods::iterator SO = M->second.begin(),
SOEnd = M->second.end();
SO != SOEnd; ++SO) {
assert(SO->second.size() > 0 &&
"Virtual function without overriding functions?");
if (SO->second.size() == 1)
continue;
// C++ [class.virtual]p2:
// In a derived class, if a virtual member function of a base
// class subobject has more than one final overrider the
// program is ill-formed.
Diag(Record->getLocation(), diag::err_multiple_final_overriders)
<< (const NamedDecl *)M->first << Record;
Diag(M->first->getLocation(),
diag::note_overridden_virtual_function);
for (OverridingMethods::overriding_iterator
OM = SO->second.begin(),
OMEnd = SO->second.end();
OM != OMEnd; ++OM)
Diag(OM->Method->getLocation(), diag::note_final_overrider)
<< (const NamedDecl *)M->first << OM->Method->getParent();
Record->setInvalidDecl();
}
}
CXXRecord->completeDefinition(&FinalOverriders);
Completed = true;
}
}
}
}
if (!Completed)
Record->completeDefinition();
// Handle attributes before checking the layout.
ProcessDeclAttributeList(S, Record, Attrs);
// We may have deferred checking for a deleted destructor. Check now.
if (CXXRecord) {
auto *Dtor = CXXRecord->getDestructor();
if (Dtor && Dtor->isImplicit() &&
ShouldDeleteSpecialMember(Dtor, CXXDestructor)) {
CXXRecord->setImplicitDestructorIsDeleted();
SetDeclDeleted(Dtor, CXXRecord->getLocation());
}
}
if (Record->hasAttrs()) {
CheckAlignasUnderalignment(Record);
if (const MSInheritanceAttr *IA = Record->getAttr<MSInheritanceAttr>())
checkMSInheritanceAttrOnDefinition(cast<CXXRecordDecl>(Record),
IA->getRange(), IA->getBestCase(),
IA->getSemanticSpelling());
}
// Check if the structure/union declaration is a type that can have zero
// size in C. For C this is a language extension, for C++ it may cause
// compatibility problems.
bool CheckForZeroSize;
if (!getLangOpts().CPlusPlus) {
CheckForZeroSize = true;
} else {
// For C++ filter out types that cannot be referenced in C code.
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
CheckForZeroSize =
CXXRecord->getLexicalDeclContext()->isExternCContext() &&
!CXXRecord->isDependentType() &&
CXXRecord->isCLike();
}
if (CheckForZeroSize) {
bool ZeroSize = true;
bool IsEmpty = true;
unsigned NonBitFields = 0;
for (RecordDecl::field_iterator I = Record->field_begin(),
E = Record->field_end();
(NonBitFields == 0 || ZeroSize) && I != E; ++I) {
IsEmpty = false;
if (I->isUnnamedBitfield()) {
if (!I->isZeroLengthBitField(Context))
ZeroSize = false;
} else {
++NonBitFields;
QualType FieldType = I->getType();
if (FieldType->isIncompleteType() ||
!Context.getTypeSizeInChars(FieldType).isZero())
ZeroSize = false;
}
}
// Empty structs are an extension in C (C99 6.7.2.1p7). They are
// allowed in C++, but warn if its declaration is inside
// extern "C" block.
if (ZeroSize) {
Diag(RecLoc, getLangOpts().CPlusPlus ?
diag::warn_zero_size_struct_union_in_extern_c :
diag::warn_zero_size_struct_union_compat)
<< IsEmpty << Record->isUnion() << (NonBitFields > 1);
}
// Structs without named members are extension in C (C99 6.7.2.1p7),
// but are accepted by GCC.
if (NonBitFields == 0 && !getLangOpts().CPlusPlus) {
Diag(RecLoc, IsEmpty ? diag::ext_empty_struct_union :
diag::ext_no_named_members_in_struct_union)
<< Record->isUnion();
}
}
} else {
ObjCIvarDecl **ClsFields =
reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
ID->setEndOfDefinitionLoc(RBrac);
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
ClsFields[i]->setLexicalDeclContext(ID);
ID->addDecl(ClsFields[i]);
}
// Must enforce the rule that ivars in the base classes may not be
// duplicates.
if (ID->getSuperClass())
DiagnoseDuplicateIvars(ID, ID->getSuperClass());
} else if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
for (unsigned I = 0, N = RecFields.size(); I != N; ++I)
// Ivar declared in @implementation never belongs to the implementation.
// Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
IMPDecl->setIvarLBraceLoc(LBrac);
IMPDecl->setIvarRBraceLoc(RBrac);
} else if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
// case of ivars in class extension; all other cases have been
// reported as errors elsewhere.
// FIXME. Class extension does not have a LocEnd field.
// CDecl->setLocEnd(RBrac);
// Add ivar's to class extension's DeclContext.
// Diagnose redeclaration of private ivars.
ObjCInterfaceDecl *IDecl = CDecl->getClassInterface();
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
if (IDecl) {
if (const ObjCIvarDecl *ClsIvar =
IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
Diag(ClsFields[i]->getLocation(),
diag::err_duplicate_ivar_declaration);
Diag(ClsIvar->getLocation(), diag::note_previous_definition);
continue;
}
for (const auto *Ext : IDecl->known_extensions()) {
if (const ObjCIvarDecl *ClsExtIvar
= Ext->getIvarDecl(ClsFields[i]->getIdentifier())) {
Diag(ClsFields[i]->getLocation(),
diag::err_duplicate_ivar_declaration);
Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
continue;
}
}
}
ClsFields[i]->setLexicalDeclContext(CDecl);
CDecl->addDecl(ClsFields[i]);
}
CDecl->setIvarLBraceLoc(LBrac);
CDecl->setIvarRBraceLoc(RBrac);
}
}
}
/// Determine whether the given integral value is representable within
/// the given type T.
static bool isRepresentableIntegerValue(ASTContext &Context,
llvm::APSInt &Value,
QualType T) {
assert((T->isIntegralType(Context) || T->isEnumeralType()) &&
"Integral type required!");
unsigned BitWidth = Context.getIntWidth(T);
if (Value.isUnsigned() || Value.isNonNegative()) {
if (T->isSignedIntegerOrEnumerationType())
--BitWidth;
return Value.getActiveBits() <= BitWidth;
}
return Value.getMinSignedBits() <= BitWidth;
}
// Given an integral type, return the next larger integral type
// (or a NULL type of no such type exists).
static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
// FIXME: Int128/UInt128 support, which also needs to be introduced into
// enum checking below.
assert((T->isIntegralType(Context) ||
T->isEnumeralType()) && "Integral type required!");
const unsigned NumTypes = 4;
QualType SignedIntegralTypes[NumTypes] = {
Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy
};
QualType UnsignedIntegralTypes[NumTypes] = {
Context.UnsignedShortTy, Context.UnsignedIntTy, Context.UnsignedLongTy,
Context.UnsignedLongLongTy
};
unsigned BitWidth = Context.getTypeSize(T);
QualType *Types = T->isSignedIntegerOrEnumerationType()? SignedIntegralTypes
: UnsignedIntegralTypes;
for (unsigned I = 0; I != NumTypes; ++I)
if (Context.getTypeSize(Types[I]) > BitWidth)
return Types[I];
return QualType();
}
EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *Val) {
unsigned IntWidth = Context.getTargetInfo().getIntWidth();
llvm::APSInt EnumVal(IntWidth);
QualType EltTy;
if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
Val = nullptr;
if (Val)
Val = DefaultLvalueConversion(Val).get();
if (Val) {
if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
else {
if (getLangOpts().CPlusPlus11 && Enum->isFixed() &&
!getLangOpts().MSVCCompat) {
// C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
// constant-expression in the enumerator-definition shall be a converted
// constant expression of the underlying type.
EltTy = Enum->getIntegerType();
ExprResult Converted =
CheckConvertedConstantExpression(Val, EltTy, EnumVal,
CCEK_Enumerator);
if (Converted.isInvalid())
Val = nullptr;
else
Val = Converted.get();
} else if (!Val->isValueDependent() &&
!(Val = VerifyIntegerConstantExpression(Val,
&EnumVal).get())) {
// C99 6.7.2.2p2: Make sure we have an integer constant expression.
} else {
if (Enum->isComplete()) {
EltTy = Enum->getIntegerType();
// In Obj-C and Microsoft mode, require the enumeration value to be
// representable in the underlying type of the enumeration. In C++11,
// we perform a non-narrowing conversion as part of converted constant
// expression checking.
if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
if (getLangOpts().MSVCCompat) {
Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get();
} else
Diag(IdLoc, diag::err_enumerator_too_large) << EltTy;
} else
Val = ImpCastExprToType(Val, EltTy,
EltTy->isBooleanType() ?
CK_IntegralToBoolean : CK_IntegralCast)
.get();
} else if (getLangOpts().CPlusPlus) {
// C++11 [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
// - If an initializer is specified for an enumerator, the
// initializing value has the same type as the expression.
EltTy = Val->getType();
} else {
// C99 6.7.2.2p2:
// The expression that defines the value of an enumeration constant
// shall be an integer constant expression that has a value
// representable as an int.
// Complain if the value is not representable in an int.
if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
Diag(IdLoc, diag::ext_enum_value_not_int)
<< EnumVal.toString(10) << Val->getSourceRange()
<< (EnumVal.isUnsigned() || EnumVal.isNonNegative());
else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
// Force the type of the expression to 'int'.
Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).get();
}
EltTy = Val->getType();
}
}
}
}
if (!Val) {
if (Enum->isDependentType())
EltTy = Context.DependentTy;
else if (!LastEnumConst) {
// C++0x [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
// - If no initializer is specified for the first enumerator, the
// initializing value has an unspecified integral type.
//
// GCC uses 'int' for its unspecified integral type, as does
// C99 6.7.2.2p3.
if (Enum->isFixed()) {
EltTy = Enum->getIntegerType();
}
else {
EltTy = Context.IntTy;
}
} else {
// Assign the last value + 1.
EnumVal = LastEnumConst->getInitVal();
++EnumVal;
EltTy = LastEnumConst->getType();
// Check for overflow on increment.
if (EnumVal < LastEnumConst->getInitVal()) {
// C++0x [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
//
// - Otherwise the type of the initializing value is the same as
// the type of the initializing value of the preceding enumerator
// unless the incremented value is not representable in that type,
// in which case the type is an unspecified integral type
// sufficient to contain the incremented value. If no such type
// exists, the program is ill-formed.
QualType T = getNextLargerIntegralType(Context, EltTy);
if (T.isNull() || Enum->isFixed()) {
// There is no integral type larger enough to represent this
// value. Complain, then allow the value to wrap around.
EnumVal = LastEnumConst->getInitVal();
EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
++EnumVal;
if (Enum->isFixed())
// When the underlying type is fixed, this is ill-formed.
Diag(IdLoc, diag::err_enumerator_wrapped)
<< EnumVal.toString(10)
<< EltTy;
else
Diag(IdLoc, diag::ext_enumerator_increment_too_large)
<< EnumVal.toString(10);
} else {
EltTy = T;
}
// Retrieve the last enumerator's value, extent that type to the
// type that is supposed to be large enough to represent the incremented
// value, then increment.
EnumVal = LastEnumConst->getInitVal();
EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
++EnumVal;
// If we're not in C++, diagnose the overflow of enumerator values,
// which in C99 means that the enumerator value is not representable in
// an int (C99 6.7.2.2p2). However, we support GCC's extension that
// permits enumerator values that are representable in some larger
// integral type.
if (!getLangOpts().CPlusPlus && !T.isNull())
Diag(IdLoc, diag::warn_enum_value_overflow);
} else if (!getLangOpts().CPlusPlus &&
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
<< EnumVal.toString(10) << 1;
}
}
}
if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy));
EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
}
return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy,
Val, EnumVal);
}
Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc) {
if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) ||
!getLangOpts().CPlusPlus)
return SkipBodyInfo();
// We have an anonymous enum definition. Look up the first enumerator to
// determine if we should merge the definition with an existing one and
// skip the body.
NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName,
forRedeclarationInCurContext());
auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl);
if (!PrevECD)
return SkipBodyInfo();
EnumDecl *PrevED = cast<EnumDecl>(PrevECD->getDeclContext());
NamedDecl *Hidden;
if (!PrevED->getDeclName() && !hasVisibleDefinition(PrevED, &Hidden)) {
SkipBodyInfo Skip;
Skip.Previous = Hidden;
return Skip;
}
return SkipBodyInfo();
}
Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val) {
EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
EnumConstantDecl *LastEnumConst =
cast_or_null<EnumConstantDecl>(lastEnumConst);
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
S = getNonFieldDeclScope(S);
// Verify that there isn't already something declared with this name in this
// scope.
LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, ForVisibleRedeclaration);
LookupName(R, S);
NamedDecl *PrevDecl = R.getAsSingle<NamedDecl>();
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
}
// C++ [class.mem]p15:
// If T is the name of a class, then each of the following shall have a name
// different from T:
// - every enumerator of every member of class T that is an unscoped
// enumerated type
if (getLangOpts().CPlusPlus && !TheEnumDecl->isScoped())
DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
DeclarationNameInfo(Id, IdLoc));
EnumConstantDecl *New =
CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
if (!New)
return nullptr;
if (PrevDecl) {
if (!TheEnumDecl->isScoped() && isa<ValueDecl>(PrevDecl)) {
// Check for other kinds of shadowing not already handled.
CheckShadow(New, PrevDecl, R);
}
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
"Received TagDecl when not in C++!");
if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
if (isa<EnumConstantDecl>(PrevDecl))
Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
else
Diag(IdLoc, diag::err_redefinition) << Id;
notePreviousDefinition(PrevDecl, IdLoc);
return nullptr;
}
}
// Process attributes.
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// Register this decl in the current scope stack.
New->setAccess(TheEnumDecl->getAccess());
PushOnScopeChains(New, S);
ActOnDocumentableDecl(New);
return New;
}
// Returns true when the enum initial expression does not trigger the
// duplicate enum warning. A few common cases are exempted as follows:
// Element2 = Element1
// Element2 = Element1 + 1
// Element2 = Element1 - 1
// Where Element2 and Element1 are from the same enum.
static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) {
Expr *InitExpr = ECD->getInitExpr();
if (!InitExpr)
return true;
InitExpr = InitExpr->IgnoreImpCasts();
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr)) {
if (!BO->isAdditiveOp())
return true;
IntegerLiteral *IL = dyn_cast<IntegerLiteral>(BO->getRHS());
if (!IL)
return true;
if (IL->getValue() != 1)
return true;
InitExpr = BO->getLHS();
}
// This checks if the elements are from the same enum.
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InitExpr);
if (!DRE)
return true;
EnumConstantDecl *EnumConstant = dyn_cast<EnumConstantDecl>(DRE->getDecl());
if (!EnumConstant)
return true;
if (cast<EnumDecl>(TagDecl::castFromDeclContext(ECD->getDeclContext())) !=
Enum)
return true;
return false;
}
// Emits a warning when an element is implicitly set a value that
// a previous element has already been set to.
static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
EnumDecl *Enum, QualType EnumType) {
// Avoid anonymous enums
if (!Enum->getIdentifier())
return;
// Only check for small enums.
if (Enum->getNumPositiveBits() > 63 || Enum->getNumNegativeBits() > 64)
return;
if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation()))
return;
typedef SmallVector<EnumConstantDecl *, 3> ECDVector;
typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
typedef std::unordered_map<int64_t, DeclOrVector> ValueToVectorMap;
// Use int64_t as a key to avoid needing special handling for DenseMap keys.
auto EnumConstantToKey = [](const EnumConstantDecl *D) {
llvm::APSInt Val = D->getInitVal();
return Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue();
};
DuplicatesVector DupVector;
ValueToVectorMap EnumMap;
// Populate the EnumMap with all values represented by enum constants without
// an initializer.
for (auto *Element : Elements) {
EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Element);
// Null EnumConstantDecl means a previous diagnostic has been emitted for
// this constant. Skip this enum since it may be ill-formed.
if (!ECD) {
return;
}
// Constants with initalizers are handled in the next loop.
if (ECD->getInitExpr())
continue;
// Duplicate values are handled in the next loop.
EnumMap.insert({EnumConstantToKey(ECD), ECD});
}
if (EnumMap.size() == 0)
return;
// Create vectors for any values that has duplicates.
for (auto *Element : Elements) {
// The last loop returned if any constant was null.
EnumConstantDecl *ECD = cast<EnumConstantDecl>(Element);
if (!ValidDuplicateEnum(ECD, Enum))
continue;
auto Iter = EnumMap.find(EnumConstantToKey(ECD));
if (Iter == EnumMap.end())
continue;
DeclOrVector& Entry = Iter->second;
if (EnumConstantDecl *D = Entry.dyn_cast<EnumConstantDecl*>()) {
// Ensure constants are different.
if (D == ECD)
continue;
// Create new vector and push values onto it.
auto Vec = llvm::make_unique<ECDVector>();
Vec->push_back(D);
Vec->push_back(ECD);
// Update entry to point to the duplicates vector.
Entry = Vec.get();
// Store the vector somewhere we can consult later for quick emission of
// diagnostics.
DupVector.emplace_back(std::move(Vec));
continue;
}
ECDVector *Vec = Entry.get<ECDVector*>();
// Make sure constants are not added more than once.
if (*Vec->begin() == ECD)
continue;
Vec->push_back(ECD);
}
// Emit diagnostics.
for (const auto &Vec : DupVector) {
assert(Vec->size() > 1 && "ECDVector should have at least 2 elements.");
// Emit warning for one enum constant.
auto *FirstECD = Vec->front();
S.Diag(FirstECD->getLocation(), diag::warn_duplicate_enum_values)
<< FirstECD << FirstECD->getInitVal().toString(10)
<< FirstECD->getSourceRange();
// Emit one note for each of the remaining enum constants with
// the same value.
for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
S.Diag(ECD->getLocation(), diag::note_duplicate_element)
<< ECD << ECD->getInitVal().toString(10)
<< ECD->getSourceRange();
}
}
bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const {
assert(ED->isClosedFlag() && "looking for value in non-flag or open enum");
assert(ED->isCompleteDefinition() && "expected enum definition");
auto R = FlagBitsCache.insert(std::make_pair(ED, llvm::APInt()));
llvm::APInt &FlagBits = R.first->second;
if (R.second) {
for (auto *E : ED->enumerators()) {
const auto &EVal = E->getInitVal();
// Only single-bit enumerators introduce new flag values.
if (EVal.isPowerOf2())
FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
}
}
// A value is in a flag enum if either its bits are a subset of the enum's
// flag bits (the first condition) or we are allowing masks and the same is
// true of its complement (the second condition). When masks are allowed, we
// allow the common idiom of ~(enum1 | enum2) to be a valid enum value.
//
// While it's true that any value could be used as a mask, the assumption is
// that a mask will have all of the insignificant bits set. Anything else is
// likely a logic error.
llvm::APInt FlagMask = ~FlagBits.zextOrTrunc(Val.getBitWidth());
return !(FlagMask & Val) || (AllowMask && !(FlagMask & ~Val));
}
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attrs) {
EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
QualType EnumType = Context.getTypeDeclType(Enum);
ProcessDeclAttributeList(S, Enum, Attrs);
if (Enum->isDependentType()) {
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
if (!ECD) continue;
ECD->setType(EnumType);
}
Enum->completeDefinition(Context.DependentTy, Context.DependentTy, 0, 0);
return;
}
// TODO: If the result value doesn't fit in an int, it must be a long or long
// long value. ISO C does not support this, but GCC does as an extension,
// emit a warning.
unsigned IntWidth = Context.getTargetInfo().getIntWidth();
unsigned CharWidth = Context.getTargetInfo().getCharWidth();
unsigned ShortWidth = Context.getTargetInfo().getShortWidth();
// Verify that all the values are okay, compute the size of the values, and
// reverse the list.
unsigned NumNegativeBits = 0;
unsigned NumPositiveBits = 0;
// Keep track of whether all elements have type int.
bool AllElementsInt = true;
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
if (!ECD) continue; // Already issued a diagnostic.
const llvm::APSInt &InitVal = ECD->getInitVal();
// Keep track of the size of positive and negative values.
if (InitVal.isUnsigned() || InitVal.isNonNegative())
NumPositiveBits = std::max(NumPositiveBits,
(unsigned)InitVal.getActiveBits());
else
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
// Keep track of whether every enum element has type int (very common).
if (AllElementsInt)
AllElementsInt = ECD->getType() == Context.IntTy;
}
// Figure out the type that should be used for this enum.
QualType BestType;
unsigned BestWidth;
// C++0x N3000 [conv.prom]p3:
// An rvalue of an unscoped enumeration type whose underlying
// type is not fixed can be converted to an rvalue of the first
// of the following types that can represent all the values of
// the enumeration: int, unsigned int, long int, unsigned long
// int, long long int, or unsigned long long int.
// C99 6.4.4.3p2:
// An identifier declared as an enumeration constant has type int.
// The C99 rule is modified by a gcc extension
QualType BestPromotionType;
bool Packed = Enum->hasAttr<PackedAttr>();
// -fshort-enums is the equivalent to specifying the packed attribute on all
// enum definitions.
if (LangOpts.ShortEnums)
Packed = true;
// If the enum already has a type because it is fixed or dictated by the
// target, promote that type instead of analyzing the enumerators.
if (Enum->isComplete()) {
BestType = Enum->getIntegerType();
if (BestType->isPromotableIntegerType())
BestPromotionType = Context.getPromotedIntegerType(BestType);
else
BestPromotionType = BestType;
BestWidth = Context.getIntWidth(BestType);
}
else if (NumNegativeBits) {
// If there is a negative value, figure out the smallest integer type (of
// int/long/longlong) that fits.
// If it's packed, check also if it fits a char or a short.
if (Packed && NumNegativeBits <= CharWidth && NumPositiveBits < CharWidth) {
BestType = Context.SignedCharTy;
BestWidth = CharWidth;
} else if (Packed && NumNegativeBits <= ShortWidth &&
NumPositiveBits < ShortWidth) {
BestType = Context.ShortTy;
BestWidth = ShortWidth;
} else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
BestType = Context.IntTy;
BestWidth = IntWidth;
} else {
BestWidth = Context.getTargetInfo().getLongWidth();
if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
BestType = Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
Diag(Enum->getLocation(), diag::ext_enum_too_large);
BestType = Context.LongLongTy;
}
}
BestPromotionType = (BestWidth <= IntWidth ? Context.IntTy : BestType);
} else {
// If there is no negative value, figure out the smallest type that fits
// all of the enumerator values.
// If it's packed, check also if it fits a char or a short.
if (Packed && NumPositiveBits <= CharWidth) {
BestType = Context.UnsignedCharTy;
BestPromotionType = Context.IntTy;
BestWidth = CharWidth;
} else if (Packed && NumPositiveBits <= ShortWidth) {
BestType = Context.UnsignedShortTy;
BestPromotionType = Context.IntTy;
BestWidth = ShortWidth;
} else if (NumPositiveBits <= IntWidth) {
BestType = Context.UnsignedIntTy;
BestWidth = IntWidth;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedIntTy : Context.IntTy;
} else if (NumPositiveBits <=
(BestWidth = Context.getTargetInfo().getLongWidth())) {
BestType = Context.UnsignedLongTy;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongTy : Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
assert(NumPositiveBits <= BestWidth &&
"How could an initializer get larger than ULL?");
BestType = Context.UnsignedLongLongTy;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongLongTy : Context.LongLongTy;
}
}
// Loop over all of the enumerator constants, changing their types to match
// the type of the enum if needed.
for (auto *D : Elements) {
auto *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
// Standard C says the enumerators have int type, but we allow, as an
// extension, the enumerators to be larger than int size. If each
// enumerator value fits in an int, type it as an int, otherwise type it the
// same as the enumerator decl itself. This means that in "enum { X = 1U }"
// that X has type 'int', not 'unsigned'.
// Determine whether the value fits into an int.
llvm::APSInt InitVal = ECD->getInitVal();
// If it fits into an integer type, force it. Otherwise force it to match
// the enum decl type.
QualType NewTy;
unsigned NewWidth;
bool NewSign;
if (!getLangOpts().CPlusPlus &&
!Enum->isFixed() &&
isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) {
NewTy = Context.IntTy;
NewWidth = IntWidth;
NewSign = true;
} else if (ECD->getType() == BestType) {
// Already the right type!
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
continue;
} else {
NewTy = BestType;
NewWidth = BestWidth;
NewSign = BestType->isSignedIntegerOrEnumerationType();
}
// Adjust the APSInt value.
InitVal = InitVal.extOrTrunc(NewWidth);
InitVal.setIsSigned(NewSign);
ECD->setInitVal(InitVal);
// Adjust the Expr initializer and type.
if (ECD->getInitExpr() &&
!Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
CK_IntegralCast,
ECD->getInitExpr(),
/*base paths*/ nullptr,
VK_RValue));
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
else
ECD->setType(NewTy);
}
Enum->completeDefinition(BestType, BestPromotionType,
NumPositiveBits, NumNegativeBits);
CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
if (Enum->isClosedFlag()) {
for (Decl *D : Elements) {
EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
llvm::APSInt InitVal = ECD->getInitVal();
if (InitVal != 0 && !InitVal.isPowerOf2() &&
!IsValueInFlagEnum(Enum, InitVal, true))
Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range)
<< ECD << Enum;
}
}
// Now that the enum type is defined, ensure it's not been underaligned.
if (Enum->hasAttrs())
CheckAlignasUnderalignment(Enum);
}
Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation StartLoc,
SourceLocation EndLoc) {
StringLiteral *AsmString = cast<StringLiteral>(expr);
FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext,
AsmString, StartLoc,
EndLoc);
CurContext->addDecl(New);
return New;
}
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation NameLoc,
SourceLocation AliasNameLoc) {
NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
LookupOrdinaryName);
AsmLabelAttr *Attr =
AsmLabelAttr::CreateImplicit(Context, AliasName->getName(), AliasNameLoc);
// If a declaration that:
// 1) declares a function or a variable
// 2) has external linkage
// already exists, add a label attribute to it.
if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
if (isDeclExternC(PrevDecl))
PrevDecl->addAttr(Attr);
else
Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/(isa<FunctionDecl>(PrevDecl) ? 0 : 1) << PrevDecl;
// Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers.
} else
(void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr));
}
void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
SourceLocation PragmaLoc,
SourceLocation NameLoc) {
Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
if (PrevDecl) {
PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
} else {
(void)WeakUndeclaredIdentifiers.insert(
std::pair<IdentifierInfo*,WeakInfo>
(Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc)));
}
}
void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation NameLoc,
SourceLocation AliasNameLoc) {
Decl *PrevDecl = LookupSingleName(TUScope, AliasName, AliasNameLoc,
LookupOrdinaryName);
WeakInfo W = WeakInfo(Name, NameLoc);
if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
if (!PrevDecl->hasAttr<AliasAttr>())
if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
DeclApplyPragmaWeak(TUScope, ND, W);
} else {
(void)WeakUndeclaredIdentifiers.insert(
std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
}
}
Decl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
Index: stable/12/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
===================================================================
--- stable/12/contrib/llvm-project/clang/lib/Sema/SemaType.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/clang/lib/Sema/SemaType.cpp (revision 356465)
@@ -1,8447 +1,8454 @@
//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements type-related semantic analysis.
//
//===----------------------------------------------------------------------===//
#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
enum TypeDiagSelector {
TDS_Function,
TDS_Pointer,
TDS_ObjCObjOrBlock
};
/// isOmittedBlockReturnType - Return true if this declarator is missing a
/// return type because this is a omitted return type on a block literal.
static bool isOmittedBlockReturnType(const Declarator &D) {
if (D.getContext() != DeclaratorContext::BlockLiteralContext ||
D.getDeclSpec().hasTypeSpecifier())
return false;
if (D.getNumTypeObjects() == 0)
return true; // ^{ ... }
if (D.getNumTypeObjects() == 1 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Function)
return true; // ^(int X, float Y) { ... }
return false;
}
/// diagnoseBadTypeAttribute - Diagnoses a type attribute which
/// doesn't apply to the given type.
static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
QualType type) {
TypeDiagSelector WhichType;
bool useExpansionLoc = true;
switch (attr.getKind()) {
case ParsedAttr::AT_ObjCGC:
WhichType = TDS_Pointer;
break;
case ParsedAttr::AT_ObjCOwnership:
WhichType = TDS_ObjCObjOrBlock;
break;
default:
// Assume everything else was a function attribute.
WhichType = TDS_Function;
useExpansionLoc = false;
break;
}
SourceLocation loc = attr.getLoc();
StringRef name = attr.getName()->getName();
// The GC attributes are usually written with macros; special-case them.
IdentifierInfo *II = attr.isArgIdent(0) ? attr.getArgAsIdent(0)->Ident
: nullptr;
if (useExpansionLoc && loc.isMacroID() && II) {
if (II->isStr("strong")) {
if (S.findMacroSpelling(loc, "__strong")) name = "__strong";
} else if (II->isStr("weak")) {
if (S.findMacroSpelling(loc, "__weak")) name = "__weak";
}
}
S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType
<< type;
}
// objc_gc applies to Objective-C pointers or, otherwise, to the
// smallest available pointer type (i.e. 'void*' in 'void**').
#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_ObjCGC: \
case ParsedAttr::AT_ObjCOwnership
// Calling convention attributes.
#define CALLING_CONV_ATTRS_CASELIST \
case ParsedAttr::AT_CDecl: \
case ParsedAttr::AT_FastCall: \
case ParsedAttr::AT_StdCall: \
case ParsedAttr::AT_ThisCall: \
case ParsedAttr::AT_RegCall: \
case ParsedAttr::AT_Pascal: \
case ParsedAttr::AT_SwiftCall: \
case ParsedAttr::AT_VectorCall: \
case ParsedAttr::AT_AArch64VectorPcs: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
case ParsedAttr::AT_Pcs: \
case ParsedAttr::AT_IntelOclBicc: \
case ParsedAttr::AT_PreserveMost: \
case ParsedAttr::AT_PreserveAll
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_NSReturnsRetained: \
case ParsedAttr::AT_NoReturn: \
case ParsedAttr::AT_Regparm: \
case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \
case ParsedAttr::AT_AnyX86NoCfCheck: \
CALLING_CONV_ATTRS_CASELIST
// Microsoft-specific type qualifiers.
#define MS_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_Ptr32: \
case ParsedAttr::AT_Ptr64: \
case ParsedAttr::AT_SPtr: \
case ParsedAttr::AT_UPtr
// Nullability qualifiers.
#define NULLABILITY_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_TypeNonNull: \
case ParsedAttr::AT_TypeNullable: \
case ParsedAttr::AT_TypeNullUnspecified
namespace {
/// An object which stores processing state for the entire
/// GetTypeForDeclarator process.
class TypeProcessingState {
Sema &sema;
/// The declarator being processed.
Declarator &declarator;
/// The index of the declarator chunk we're currently processing.
/// May be the total number of valid chunks, indicating the
/// DeclSpec.
unsigned chunkIndex;
/// Whether there are non-trivial modifications to the decl spec.
bool trivial;
/// Whether we saved the attributes in the decl spec.
bool hasSavedAttrs;
/// The original set of attributes on the DeclSpec.
SmallVector<ParsedAttr *, 2> savedAttrs;
/// A list of attributes to diagnose the uselessness of when the
/// processing is complete.
SmallVector<ParsedAttr *, 2> ignoredTypeAttrs;
/// Attributes corresponding to AttributedTypeLocs that we have not yet
/// populated.
// FIXME: The two-phase mechanism by which we construct Types and fill
// their TypeLocs makes it hard to correctly assign these. We keep the
// attributes in creation order as an attempt to make them line up
// properly.
using TypeAttrPair = std::pair<const AttributedType*, const Attr*>;
SmallVector<TypeAttrPair, 8> AttrsForTypes;
bool AttrsForTypesSorted = true;
/// MacroQualifiedTypes mapping to macro expansion locations that will be
/// stored in a MacroQualifiedTypeLoc.
llvm::DenseMap<const MacroQualifiedType *, SourceLocation> LocsForMacros;
/// Flag to indicate we parsed a noderef attribute. This is used for
/// validating that noderef was used on a pointer or array.
bool parsedNoDeref;
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
: sema(sema), declarator(declarator),
chunkIndex(declarator.getNumTypeObjects()), trivial(true),
hasSavedAttrs(false), parsedNoDeref(false) {}
Sema &getSema() const {
return sema;
}
Declarator &getDeclarator() const {
return declarator;
}
bool isProcessingDeclSpec() const {
return chunkIndex == declarator.getNumTypeObjects();
}
unsigned getCurrentChunkIndex() const {
return chunkIndex;
}
void setCurrentChunkIndex(unsigned idx) {
assert(idx <= declarator.getNumTypeObjects());
chunkIndex = idx;
}
ParsedAttributesView &getCurrentAttributes() const {
if (isProcessingDeclSpec())
return getMutableDeclSpec().getAttributes();
return declarator.getTypeObject(chunkIndex).getAttrs();
}
/// Save the current set of attributes on the DeclSpec.
void saveDeclSpecAttrs() {
// Don't try to save them multiple times.
if (hasSavedAttrs) return;
DeclSpec &spec = getMutableDeclSpec();
for (ParsedAttr &AL : spec.getAttributes())
savedAttrs.push_back(&AL);
trivial &= savedAttrs.empty();
hasSavedAttrs = true;
}
/// Record that we had nowhere to put the given type attribute.
/// We will diagnose such attributes later.
void addIgnoredTypeAttr(ParsedAttr &attr) {
ignoredTypeAttrs.push_back(&attr);
}
/// Diagnose all the ignored type attributes, given that the
/// declarator worked out to the given type.
void diagnoseIgnoredTypeAttrs(QualType type) const {
for (auto *Attr : ignoredTypeAttrs)
diagnoseBadTypeAttribute(getSema(), *Attr, type);
}
/// Get an attributed type for the given attribute, and remember the Attr
/// object so that we can attach it to the AttributedTypeLoc.
QualType getAttributedType(Attr *A, QualType ModifiedType,
QualType EquivType) {
QualType T =
sema.Context.getAttributedType(A->getKind(), ModifiedType, EquivType);
AttrsForTypes.push_back({cast<AttributedType>(T.getTypePtr()), A});
AttrsForTypesSorted = false;
return T;
}
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. Also replace \p TypeWithAuto in \c TypeAttrPair if
/// necessary.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement) {
QualType T = sema.ReplaceAutoType(TypeWithAuto, Replacement);
if (auto *AttrTy = TypeWithAuto->getAs<AttributedType>()) {
// Attributed type still should be an attributed type after replacement.
auto *NewAttrTy = cast<AttributedType>(T.getTypePtr());
for (TypeAttrPair &A : AttrsForTypes) {
if (A.first == AttrTy)
A.first = NewAttrTy;
}
AttrsForTypesSorted = false;
}
return T;
}
/// Extract and remove the Attr* for a given attributed type.
const Attr *takeAttrForAttributedType(const AttributedType *AT) {
if (!AttrsForTypesSorted) {
llvm::stable_sort(AttrsForTypes, llvm::less_first());
AttrsForTypesSorted = true;
}
// FIXME: This is quadratic if we have lots of reuses of the same
// attributed type.
for (auto It = std::partition_point(
AttrsForTypes.begin(), AttrsForTypes.end(),
[=](const TypeAttrPair &A) { return A.first < AT; });
It != AttrsForTypes.end() && It->first == AT; ++It) {
if (It->second) {
const Attr *Result = It->second;
It->second = nullptr;
return Result;
}
}
llvm_unreachable("no Attr* for AttributedType*");
}
SourceLocation
getExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT) const {
auto FoundLoc = LocsForMacros.find(MQT);
assert(FoundLoc != LocsForMacros.end() &&
"Unable to find macro expansion location for MacroQualifedType");
return FoundLoc->second;
}
void setExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT,
SourceLocation Loc) {
LocsForMacros[MQT] = Loc;
}
void setParsedNoDeref(bool parsed) { parsedNoDeref = parsed; }
bool didParseNoDeref() const { return parsedNoDeref; }
~TypeProcessingState() {
if (trivial) return;
restoreDeclSpecAttrs();
}
private:
DeclSpec &getMutableDeclSpec() const {
return const_cast<DeclSpec&>(declarator.getDeclSpec());
}
void restoreDeclSpecAttrs() {
assert(hasSavedAttrs);
getMutableDeclSpec().getAttributes().clearListOnly();
for (ParsedAttr *AL : savedAttrs)
getMutableDeclSpec().getAttributes().addAtEnd(AL);
}
};
} // end anonymous namespace
static void moveAttrFromListToList(ParsedAttr &attr,
ParsedAttributesView &fromList,
ParsedAttributesView &toList) {
fromList.remove(&attr);
toList.addAtEnd(&attr);
}
/// The location of a type attribute.
enum TypeAttrLocation {
/// The attribute is in the decl-specifier-seq.
TAL_DeclSpec,
/// The attribute is part of a DeclaratorChunk.
TAL_DeclChunk,
/// The attribute is immediately after the declaration's name.
TAL_DeclName
};
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL, ParsedAttributesView &attrs);
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type);
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type);
static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type);
static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type);
static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type) {
if (attr.getKind() == ParsedAttr::AT_ObjCGC)
return handleObjCGCTypeAttr(state, attr, type);
assert(attr.getKind() == ParsedAttr::AT_ObjCOwnership);
return handleObjCOwnershipTypeAttr(state, attr, type);
}
/// Given the index of a declarator chunk, check whether that chunk
/// directly specifies the return type of a function and, if so, find
/// an appropriate place for it.
///
/// \param i - a notional index which the search will start
/// immediately inside
///
/// \param onlyBlockPointers Whether we should only look into block
/// pointer types (vs. all pointer types).
static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator,
unsigned i,
bool onlyBlockPointers) {
assert(i <= declarator.getNumTypeObjects());
DeclaratorChunk *result = nullptr;
// First, look inwards past parens for a function declarator.
for (; i != 0; --i) {
DeclaratorChunk &fnChunk = declarator.getTypeObject(i-1);
switch (fnChunk.Kind) {
case DeclaratorChunk::Paren:
continue;
// If we find anything except a function, bail out.
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::Array:
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
return result;
// If we do find a function declarator, scan inwards from that,
// looking for a (block-)pointer declarator.
case DeclaratorChunk::Function:
for (--i; i != 0; --i) {
DeclaratorChunk &ptrChunk = declarator.getTypeObject(i-1);
switch (ptrChunk.Kind) {
case DeclaratorChunk::Paren:
case DeclaratorChunk::Array:
case DeclaratorChunk::Function:
case DeclaratorChunk::Reference:
case DeclaratorChunk::Pipe:
continue;
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pointer:
if (onlyBlockPointers)
continue;
LLVM_FALLTHROUGH;
case DeclaratorChunk::BlockPointer:
result = &ptrChunk;
goto continue_outer;
}
llvm_unreachable("bad declarator chunk kind");
}
// If we run out of declarators doing that, we're done.
return result;
}
llvm_unreachable("bad declarator chunk kind");
// Okay, reconsider from our new point.
continue_outer: ;
}
// Ran out of chunks, bail out.
return result;
}
/// Given that an objc_gc attribute was written somewhere on a
/// declaration *other* than on the declarator itself (for which, use
/// distributeObjCPointerTypeAttrFromDeclarator), and given that it
/// didn't apply in whatever position it was written in, try to move
/// it to a more appropriate position.
static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType type) {
Declarator &declarator = state.getDeclarator();
// Move it to the outermost normal or block pointer declarator.
for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
switch (chunk.Kind) {
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer: {
// But don't move an ARC ownership attribute to the return type
// of a block.
DeclaratorChunk *destChunk = nullptr;
if (state.isProcessingDeclSpec() &&
attr.getKind() == ParsedAttr::AT_ObjCOwnership)
destChunk = maybeMovePastReturnType(declarator, i - 1,
/*onlyBlockPointers=*/true);
if (!destChunk) destChunk = &chunk;
moveAttrFromListToList(attr, state.getCurrentAttributes(),
destChunk->getAttrs());
return;
}
case DeclaratorChunk::Paren:
case DeclaratorChunk::Array:
continue;
// We may be starting at the return type of a block.
case DeclaratorChunk::Function:
if (state.isProcessingDeclSpec() &&
attr.getKind() == ParsedAttr::AT_ObjCOwnership) {
if (DeclaratorChunk *dest = maybeMovePastReturnType(
declarator, i,
/*onlyBlockPointers=*/true)) {
moveAttrFromListToList(attr, state.getCurrentAttributes(),
dest->getAttrs());
return;
}
}
goto error;
// Don't walk through these.
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
goto error;
}
}
error:
diagnoseBadTypeAttribute(state.getSema(), attr, type);
}
/// Distribute an objc_gc type attribute that was written on the
/// declarator.
static void distributeObjCPointerTypeAttrFromDeclarator(
TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// objc_gc goes on the innermost pointer to something that's not a
// pointer.
unsigned innermost = -1U;
bool considerDeclSpec = true;
for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i);
switch (chunk.Kind) {
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer:
innermost = i;
continue;
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Paren:
case DeclaratorChunk::Array:
case DeclaratorChunk::Pipe:
continue;
case DeclaratorChunk::Function:
considerDeclSpec = false;
goto done;
}
}
done:
// That might actually be the decl spec if we weren't blocked by
// anything in the declarator.
if (considerDeclSpec) {
if (handleObjCPointerTypeAttr(state, attr, declSpecType)) {
// Splice the attribute into the decl spec. Prevents the
// attribute from being applied multiple times and gives
// the source-location-filler something to work with.
state.saveDeclSpecAttrs();
declarator.getMutableDeclSpec().getAttributes().takeOneFrom(
declarator.getAttributes(), &attr);
return;
}
}
// Otherwise, if we found an appropriate chunk, splice the attribute
// into it.
if (innermost != -1U) {
moveAttrFromListToList(attr, declarator.getAttributes(),
declarator.getTypeObject(innermost).getAttrs());
return;
}
// Otherwise, diagnose when we're done building the type.
declarator.getAttributes().remove(&attr);
state.addIgnoredTypeAttr(attr);
}
/// A function type attribute was written somewhere in a declaration
/// *other* than on the declarator itself or in the decl spec. Given
/// that it didn't apply in whatever position it was written in, try
/// to move it to a more appropriate position.
static void distributeFunctionTypeAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType type) {
Declarator &declarator = state.getDeclarator();
// Try to push the attribute from the return type of a function to
// the function itself.
for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
switch (chunk.Kind) {
case DeclaratorChunk::Function:
moveAttrFromListToList(attr, state.getCurrentAttributes(),
chunk.getAttrs());
return;
case DeclaratorChunk::Paren:
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::Array:
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
continue;
}
}
diagnoseBadTypeAttribute(state.getSema(), attr, type);
}
/// Try to distribute a function type attribute to the innermost
/// function chunk or type. Returns true if the attribute was
/// distributed, false if no location was found.
static bool distributeFunctionTypeAttrToInnermost(
TypeProcessingState &state, ParsedAttr &attr,
ParsedAttributesView &attrList, QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// Put it on the innermost function chunk, if there is one.
for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i);
if (chunk.Kind != DeclaratorChunk::Function) continue;
moveAttrFromListToList(attr, attrList, chunk.getAttrs());
return true;
}
return handleFunctionTypeAttr(state, attr, declSpecType);
}
/// A function type attribute was written in the decl spec. Try to
/// apply it somewhere.
static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
ParsedAttr &attr,
QualType &declSpecType) {
state.saveDeclSpecAttrs();
// C++11 attributes before the decl specifiers actually appertain to
// the declarators. Move them straight there. We don't support the
// 'put them wherever you like' semantics we allow for GNU attributes.
if (attr.isCXX11Attribute()) {
moveAttrFromListToList(attr, state.getCurrentAttributes(),
state.getDeclarator().getAttributes());
return;
}
// Try to distribute to the innermost.
if (distributeFunctionTypeAttrToInnermost(
state, attr, state.getCurrentAttributes(), declSpecType))
return;
// If that failed, diagnose the bad attribute when the declarator is
// fully built.
state.addIgnoredTypeAttr(attr);
}
/// A function type attribute was written on the declarator. Try to
/// apply it somewhere.
static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
ParsedAttr &attr,
QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// Try to distribute to the innermost.
if (distributeFunctionTypeAttrToInnermost(
state, attr, declarator.getAttributes(), declSpecType))
return;
// If that failed, diagnose the bad attribute when the declarator is
// fully built.
declarator.getAttributes().remove(&attr);
state.addIgnoredTypeAttr(attr);
}
/// Given that there are attributes written on the declarator
/// itself, try to distribute any type attributes to the appropriate
/// declarator chunk.
///
/// These are attributes like the following:
/// int f ATTR;
/// int (f ATTR)();
/// but not necessarily this:
/// int f() ATTR;
static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
QualType &declSpecType) {
// Collect all the type attributes from the declarator itself.
assert(!state.getDeclarator().getAttributes().empty() &&
"declarator has no attrs!");
// The called functions in this loop actually remove things from the current
// list, so iterating over the existing list isn't possible. Instead, make a
// non-owning copy and iterate over that.
ParsedAttributesView AttrsCopy{state.getDeclarator().getAttributes()};
for (ParsedAttr &attr : AttrsCopy) {
// Do not distribute C++11 attributes. They have strict rules for what
// they appertain to.
if (attr.isCXX11Attribute())
continue;
switch (attr.getKind()) {
OBJC_POINTER_TYPE_ATTRS_CASELIST:
distributeObjCPointerTypeAttrFromDeclarator(state, attr, declSpecType);
break;
FUNCTION_TYPE_ATTRS_CASELIST:
distributeFunctionTypeAttrFromDeclarator(state, attr, declSpecType);
break;
MS_TYPE_ATTRS_CASELIST:
// Microsoft type attributes cannot go after the declarator-id.
continue;
NULLABILITY_TYPE_ATTRS_CASELIST:
// Nullability specifiers cannot go after the declarator-id.
// Objective-C __kindof does not get distributed.
case ParsedAttr::AT_ObjCKindOf:
continue;
default:
break;
}
}
}
/// Add a synthetic '()' to a block-literal declarator if it is
/// required, given the return type.
static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
QualType declSpecType) {
Declarator &declarator = state.getDeclarator();
// First, check whether the declarator would produce a function,
// i.e. whether the innermost semantic chunk is a function.
if (declarator.isFunctionDeclarator()) {
// If so, make that declarator a prototyped declarator.
declarator.getFunctionTypeInfo().hasPrototype = true;
return;
}
// If there are any type objects, the type as written won't name a
// function, regardless of the decl spec type. This is because a
// block signature declarator is always an abstract-declarator, and
// abstract-declarators can't just be parentheses chunks. Therefore
// we need to build a function chunk unless there are no type
// objects and the decl spec type is a function.
if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType())
return;
// Note that there *are* cases with invalid declarators where
// declarators consist solely of parentheses. In general, these
// occur only in failed efforts to make function declarators, so
// faking up the function chunk is still the right thing to do.
// Otherwise, we need to fake up a function declarator.
SourceLocation loc = declarator.getBeginLoc();
// ...and *prepend* it to the declarator.
SourceLocation NoLoc;
declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
/*HasProto=*/true,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
/*ArgInfo=*/nullptr,
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
/*DeclsInPrototype=*/None, loc, loc, declarator));
// For consistency, make sure the state still has us as processing
// the decl spec.
assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1);
state.setCurrentChunkIndex(declarator.getNumTypeObjects());
}
static void diagnoseAndRemoveTypeQualifiers(Sema &S, const DeclSpec &DS,
unsigned &TypeQuals,
QualType TypeSoFar,
unsigned RemoveTQs,
unsigned DiagID) {
// If this occurs outside a template instantiation, warn the user about
// it; they probably didn't mean to specify a redundant qualifier.
typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc;
for (QualLoc Qual : {QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()),
QualLoc(DeclSpec::TQ_restrict, DS.getRestrictSpecLoc()),
QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()),
QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())}) {
if (!(RemoveTQs & Qual.first))
continue;
if (!S.inTemplateInstantiation()) {
if (TypeQuals & Qual.first)
S.Diag(Qual.second, DiagID)
<< DeclSpec::getSpecifierName(Qual.first) << TypeSoFar
<< FixItHint::CreateRemoval(Qual.second);
}
TypeQuals &= ~Qual.first;
}
}
/// Return true if this is omitted block return type. Also check type
/// attributes and type qualifiers when returning true.
static bool checkOmittedBlockReturnType(Sema &S, Declarator &declarator,
QualType Result) {
if (!isOmittedBlockReturnType(declarator))
return false;
// Warn if we see type attributes for omitted return type on a block literal.
SmallVector<ParsedAttr *, 2> ToBeRemoved;
for (ParsedAttr &AL : declarator.getMutableDeclSpec().getAttributes()) {
if (AL.isInvalid() || !AL.isTypeAttr())
continue;
S.Diag(AL.getLoc(),
diag::warn_block_literal_attributes_on_omitted_return_type)
<< AL.getName();
ToBeRemoved.push_back(&AL);
}
// Remove bad attributes from the list.
for (ParsedAttr *AL : ToBeRemoved)
declarator.getMutableDeclSpec().getAttributes().remove(AL);
// Warn if we see type qualifiers for omitted return type on a block literal.
const DeclSpec &DS = declarator.getDeclSpec();
unsigned TypeQuals = DS.getTypeQualifiers();
diagnoseAndRemoveTypeQualifiers(S, DS, TypeQuals, Result, (unsigned)-1,
diag::warn_block_literal_qualifiers_on_omitted_return_type);
declarator.getMutableDeclSpec().ClearTypeQualifiers();
return true;
}
/// Apply Objective-C type arguments to the given type.
static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
ArrayRef<TypeSourceInfo *> typeArgs,
SourceRange typeArgsRange,
bool failOnError = false) {
// We can only apply type arguments to an Objective-C class type.
const auto *objcObjectType = type->getAs<ObjCObjectType>();
if (!objcObjectType || !objcObjectType->getInterface()) {
S.Diag(loc, diag::err_objc_type_args_non_class)
<< type
<< typeArgsRange;
if (failOnError)
return QualType();
return type;
}
// The class type must be parameterized.
ObjCInterfaceDecl *objcClass = objcObjectType->getInterface();
ObjCTypeParamList *typeParams = objcClass->getTypeParamList();
if (!typeParams) {
S.Diag(loc, diag::err_objc_type_args_non_parameterized_class)
<< objcClass->getDeclName()
<< FixItHint::CreateRemoval(typeArgsRange);
if (failOnError)
return QualType();
return type;
}
// The type must not already be specialized.
if (objcObjectType->isSpecialized()) {
S.Diag(loc, diag::err_objc_type_args_specialized_class)
<< type
<< FixItHint::CreateRemoval(typeArgsRange);
if (failOnError)
return QualType();
return type;
}
// Check the type arguments.
SmallVector<QualType, 4> finalTypeArgs;
unsigned numTypeParams = typeParams->size();
bool anyPackExpansions = false;
for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) {
TypeSourceInfo *typeArgInfo = typeArgs[i];
QualType typeArg = typeArgInfo->getType();
// Type arguments cannot have explicit qualifiers or nullability.
// We ignore indirect sources of these, e.g. behind typedefs or
// template arguments.
if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) {
bool diagnosed = false;
SourceRange rangeToRemove;
if (auto attr = qual.getAs<AttributedTypeLoc>()) {
rangeToRemove = attr.getLocalSourceRange();
if (attr.getTypePtr()->getImmediateNullability()) {
typeArg = attr.getTypePtr()->getModifiedType();
S.Diag(attr.getBeginLoc(),
diag::err_objc_type_arg_explicit_nullability)
<< typeArg << FixItHint::CreateRemoval(rangeToRemove);
diagnosed = true;
}
}
if (!diagnosed) {
S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified)
<< typeArg << typeArg.getQualifiers().getAsString()
<< FixItHint::CreateRemoval(rangeToRemove);
}
}
// Remove qualifiers even if they're non-local.
typeArg = typeArg.getUnqualifiedType();
finalTypeArgs.push_back(typeArg);
if (typeArg->getAs<PackExpansionType>())
anyPackExpansions = true;
// Find the corresponding type parameter, if there is one.
ObjCTypeParamDecl *typeParam = nullptr;
if (!anyPackExpansions) {
if (i < numTypeParams) {
typeParam = typeParams->begin()[i];
} else {
// Too many arguments.
S.Diag(loc, diag::err_objc_type_args_wrong_arity)
<< false
<< objcClass->getDeclName()
<< (unsigned)typeArgs.size()
<< numTypeParams;
S.Diag(objcClass->getLocation(), diag::note_previous_decl)
<< objcClass;
if (failOnError)
return QualType();
return type;
}
}
// Objective-C object pointer types must be substitutable for the bounds.
if (const auto *typeArgObjC = typeArg->getAs<ObjCObjectPointerType>()) {
// If we don't have a type parameter to match against, assume
// everything is fine. There was a prior pack expansion that
// means we won't be able to match anything.
if (!typeParam) {
assert(anyPackExpansions && "Too many arguments?");
continue;
}
// Retrieve the bound.
QualType bound = typeParam->getUnderlyingType();
const auto *boundObjC = bound->getAs<ObjCObjectPointerType>();
// Determine whether the type argument is substitutable for the bound.
if (typeArgObjC->isObjCIdType()) {
// When the type argument is 'id', the only acceptable type
// parameter bound is 'id'.
if (boundObjC->isObjCIdType())
continue;
} else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) {
// Otherwise, we follow the assignability rules.
continue;
}
// Diagnose the mismatch.
S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_does_not_match_bound)
<< typeArg << bound << typeParam->getDeclName();
S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
<< typeParam->getDeclName();
if (failOnError)
return QualType();
return type;
}
// Block pointer types are permitted for unqualified 'id' bounds.
if (typeArg->isBlockPointerType()) {
// If we don't have a type parameter to match against, assume
// everything is fine. There was a prior pack expansion that
// means we won't be able to match anything.
if (!typeParam) {
assert(anyPackExpansions && "Too many arguments?");
continue;
}
// Retrieve the bound.
QualType bound = typeParam->getUnderlyingType();
if (bound->isBlockCompatibleObjCPointerType(S.Context))
continue;
// Diagnose the mismatch.
S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_does_not_match_bound)
<< typeArg << bound << typeParam->getDeclName();
S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
<< typeParam->getDeclName();
if (failOnError)
return QualType();
return type;
}
// Dependent types will be checked at instantiation time.
if (typeArg->isDependentType()) {
continue;
}
// Diagnose non-id-compatible type arguments.
S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_not_id_compatible)
<< typeArg << typeArgInfo->getTypeLoc().getSourceRange();
if (failOnError)
return QualType();
return type;
}
// Make sure we didn't have the wrong number of arguments.
if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
S.Diag(loc, diag::err_objc_type_args_wrong_arity)
<< (typeArgs.size() < typeParams->size())
<< objcClass->getDeclName()
<< (unsigned)finalTypeArgs.size()
<< (unsigned)numTypeParams;
S.Diag(objcClass->getLocation(), diag::note_previous_decl)
<< objcClass;
if (failOnError)
return QualType();
return type;
}
// Success. Form the specialized type.
return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false);
}
QualType Sema::BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError) {
QualType Result = QualType(Decl->getTypeForDecl(), 0);
if (!Protocols.empty()) {
bool HasError;
Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
HasError);
if (HasError) {
Diag(SourceLocation(), diag::err_invalid_protocol_qualifiers)
<< SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
if (FailOnError) Result = QualType();
}
if (FailOnError && Result.isNull())
return QualType();
}
return Result;
}
QualType Sema::BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError) {
QualType Result = BaseType;
if (!TypeArgs.empty()) {
Result = applyObjCTypeArgs(*this, Loc, Result, TypeArgs,
SourceRange(TypeArgsLAngleLoc,
TypeArgsRAngleLoc),
FailOnError);
if (FailOnError && Result.isNull())
return QualType();
}
if (!Protocols.empty()) {
bool HasError;
Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
HasError);
if (HasError) {
Diag(Loc, diag::err_invalid_protocol_qualifiers)
<< SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
if (FailOnError) Result = QualType();
}
if (FailOnError && Result.isNull())
return QualType();
}
return Result;
}
TypeResult Sema::actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc) {
// Form id<protocol-list>.
QualType Result = Context.getObjCObjectType(
Context.ObjCBuiltinIdTy, { },
llvm::makeArrayRef(
(ObjCProtocolDecl * const *)protocols.data(),
protocols.size()),
false);
Result = Context.getObjCObjectPointerType(Result);
TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
TypeLoc ResultTL = ResultTInfo->getTypeLoc();
auto ObjCObjectPointerTL = ResultTL.castAs<ObjCObjectPointerTypeLoc>();
ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit
auto ObjCObjectTL = ObjCObjectPointerTL.getPointeeLoc()
.castAs<ObjCObjectTypeLoc>();
ObjCObjectTL.setHasBaseTypeAsWritten(false);
ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation());
// No type arguments.
ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
// Fill in protocol qualifiers.
ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc);
ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc);
for (unsigned i = 0, n = protocols.size(); i != n; ++i)
ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]);
// We're done. Return the completed type to the parser.
return CreateParsedType(Result, ResultTInfo);
}
TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc) {
TypeSourceInfo *BaseTypeInfo = nullptr;
QualType T = GetTypeFromParser(BaseType, &BaseTypeInfo);
if (T.isNull())
return true;
// Handle missing type-source info.
if (!BaseTypeInfo)
BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc);
// Extract type arguments.
SmallVector<TypeSourceInfo *, 4> ActualTypeArgInfos;
for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) {
TypeSourceInfo *TypeArgInfo = nullptr;
QualType TypeArg = GetTypeFromParser(TypeArgs[i], &TypeArgInfo);
if (TypeArg.isNull()) {
ActualTypeArgInfos.clear();
break;
}
assert(TypeArgInfo && "No type source info?");
ActualTypeArgInfos.push_back(TypeArgInfo);
}
// Build the object type.
QualType Result = BuildObjCObjectType(
T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(),
TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc,
ProtocolLAngleLoc,
llvm::makeArrayRef((ObjCProtocolDecl * const *)Protocols.data(),
Protocols.size()),
ProtocolLocs, ProtocolRAngleLoc,
/*FailOnError=*/false);
if (Result == T)
return BaseType;
// Create source information for this type.
TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
TypeLoc ResultTL = ResultTInfo->getTypeLoc();
// For id<Proto1, Proto2> or Class<Proto1, Proto2>, we'll have an
// object pointer type. Fill in source information for it.
if (auto ObjCObjectPointerTL = ResultTL.getAs<ObjCObjectPointerTypeLoc>()) {
// The '*' is implicit.
ObjCObjectPointerTL.setStarLoc(SourceLocation());
ResultTL = ObjCObjectPointerTL.getPointeeLoc();
}
if (auto OTPTL = ResultTL.getAs<ObjCTypeParamTypeLoc>()) {
// Protocol qualifier information.
if (OTPTL.getNumProtocols() > 0) {
assert(OTPTL.getNumProtocols() == Protocols.size());
OTPTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
OTPTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
OTPTL.setProtocolLoc(i, ProtocolLocs[i]);
}
// We're done. Return the completed type to the parser.
return CreateParsedType(Result, ResultTInfo);
}
auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>();
// Type argument information.
if (ObjCObjectTL.getNumTypeArgs() > 0) {
assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size());
ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc);
ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc);
for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i)
ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]);
} else {
ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
}
// Protocol qualifier information.
if (ObjCObjectTL.getNumProtocols() > 0) {
assert(ObjCObjectTL.getNumProtocols() == Protocols.size());
ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]);
} else {
ObjCObjectTL.setProtocolLAngleLoc(SourceLocation());
ObjCObjectTL.setProtocolRAngleLoc(SourceLocation());
}
// Base type.
ObjCObjectTL.setHasBaseTypeAsWritten(true);
if (ObjCObjectTL.getType() == T)
ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc());
else
ObjCObjectTL.getBaseLoc().initialize(Context, Loc);
// We're done. Return the completed type to the parser.
return CreateParsedType(Result, ResultTInfo);
}
static OpenCLAccessAttr::Spelling
getImageAccess(const ParsedAttributesView &Attrs) {
for (const ParsedAttr &AL : Attrs)
if (AL.getKind() == ParsedAttr::AT_OpenCLAccess)
return static_cast<OpenCLAccessAttr::Spelling>(AL.getSemanticSpelling());
return OpenCLAccessAttr::Keyword_read_only;
}
/// Convert the specified declspec to the appropriate type
/// object.
/// \param state Specifies the declarator containing the declaration specifier
/// to be converted, along with other associated processing state.
/// \returns The type described by the declaration specifiers. This function
/// never returns null.
static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// FIXME: Should move the logic from DeclSpec::Finish to here for validity
// checking.
Sema &S = state.getSema();
Declarator &declarator = state.getDeclarator();
DeclSpec &DS = declarator.getMutableDeclSpec();
SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
DeclLoc = DS.getBeginLoc();
ASTContext &Context = S.Context;
QualType Result;
switch (DS.getTypeSpecType()) {
case DeclSpec::TST_void:
Result = Context.VoidTy;
break;
case DeclSpec::TST_char:
if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
Result = Context.CharTy;
else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed)
Result = Context.SignedCharTy;
else {
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
"Unknown TSS value");
Result = Context.UnsignedCharTy;
}
break;
case DeclSpec::TST_wchar:
if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
Result = Context.WCharTy;
else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType(),
Context.getPrintingPolicy());
Result = Context.getSignedWCharType();
} else {
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
"Unknown TSS value");
S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType(),
Context.getPrintingPolicy());
Result = Context.getUnsignedWCharType();
}
break;
case DeclSpec::TST_char8:
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
"Unknown TSS value");
Result = Context.Char8Ty;
break;
case DeclSpec::TST_char16:
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
"Unknown TSS value");
Result = Context.Char16Ty;
break;
case DeclSpec::TST_char32:
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
"Unknown TSS value");
Result = Context.Char32Ty;
break;
case DeclSpec::TST_unspecified:
// If this is a missing declspec in a block literal return context, then it
// is inferred from the return statements inside the block.
// The declspec is always missing in a lambda expr context; it is either
// specified with a trailing return type or inferred.
if (S.getLangOpts().CPlusPlus14 &&
declarator.getContext() == DeclaratorContext::LambdaExprContext) {
// In C++1y, a lambda's implicit return type is 'auto'.
Result = Context.getAutoDeductType();
break;
} else if (declarator.getContext() ==
DeclaratorContext::LambdaExprContext ||
checkOmittedBlockReturnType(S, declarator,
Context.DependentTy)) {
Result = Context.DependentTy;
break;
}
// Unspecified typespec defaults to int in C90. However, the C90 grammar
// [C90 6.5] only allows a decl-spec if there was *some* type-specifier,
// type-qualifier, or storage-class-specifier. If not, emit an extwarn.
// Note that the one exception to this is function definitions, which are
// allowed to be completely missing a declspec. This is handled in the
// parser already though by it pretending to have seen an 'int' in this
// case.
if (S.getLangOpts().ImplicitInt) {
// In C89 mode, we only warn if there is a completely missing declspec
// when one is not allowed.
if (DS.isEmpty()) {
S.Diag(DeclLoc, diag::ext_missing_declspec)
<< DS.getSourceRange()
<< FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
}
} else if (!DS.hasTypeSpecifier()) {
// C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
// "At least one type specifier shall be given in the declaration
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
if (S.getLangOpts().CPlusPlus && !DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_type_specifier)
<< DS.getSourceRange();
// When this occurs in C++ code, often something is very broken with the
// value being declared, poison it as invalid so we don't get chains of
// errors.
declarator.setInvalidType(true);
} else if ((S.getLangOpts().OpenCLVersion >= 200 ||
S.getLangOpts().OpenCLCPlusPlus) &&
DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_actual_pipe_type)
<< DS.getSourceRange();
declarator.setInvalidType(true);
} else {
S.Diag(DeclLoc, diag::ext_missing_type_specifier)
<< DS.getSourceRange();
}
}
LLVM_FALLTHROUGH;
case DeclSpec::TST_int: {
if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_unspecified: Result = Context.IntTy; break;
case DeclSpec::TSW_short: Result = Context.ShortTy; break;
case DeclSpec::TSW_long: Result = Context.LongTy; break;
case DeclSpec::TSW_longlong:
Result = Context.LongLongTy;
// 'long long' is a C99 or C++11 feature.
if (!S.getLangOpts().C99) {
if (S.getLangOpts().CPlusPlus)
S.Diag(DS.getTypeSpecWidthLoc(),
S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
else
S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
}
break;
}
} else {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break;
case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break;
case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
case DeclSpec::TSW_longlong:
Result = Context.UnsignedLongLongTy;
// 'long long' is a C99 or C++11 feature.
if (!S.getLangOpts().C99) {
if (S.getLangOpts().CPlusPlus)
S.Diag(DS.getTypeSpecWidthLoc(),
S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
else
S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
}
break;
}
}
break;
}
case DeclSpec::TST_accum: {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_short:
Result = Context.ShortAccumTy;
break;
case DeclSpec::TSW_unspecified:
Result = Context.AccumTy;
break;
case DeclSpec::TSW_long:
Result = Context.LongAccumTy;
break;
case DeclSpec::TSW_longlong:
llvm_unreachable("Unable to specify long long as _Accum width");
}
if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
Result = Context.getCorrespondingUnsignedType(Result);
if (DS.isTypeSpecSat())
Result = Context.getCorrespondingSaturatedType(Result);
break;
}
case DeclSpec::TST_fract: {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_short:
Result = Context.ShortFractTy;
break;
case DeclSpec::TSW_unspecified:
Result = Context.FractTy;
break;
case DeclSpec::TSW_long:
Result = Context.LongFractTy;
break;
case DeclSpec::TSW_longlong:
llvm_unreachable("Unable to specify long long as _Fract width");
}
if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
Result = Context.getCorrespondingUnsignedType(Result);
if (DS.isTypeSpecSat())
Result = Context.getCorrespondingSaturatedType(Result);
break;
}
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type() &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
Result = Context.UnsignedInt128Ty;
else
Result = Context.Int128Ty;
break;
case DeclSpec::TST_float16:
// CUDA host and device may have different _Float16 support, therefore
// do not diagnose _Float16 usage to avoid false alarm.
// ToDo: more precise diagnostics for CUDA.
if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "_Float16";
Result = Context.Float16Ty;
break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
Result = Context.LongDoubleTy;
else
Result = Context.DoubleTy;
break;
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
Result = Context.Float128Ty;
break;
case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
break;
case DeclSpec::TST_decimal32: // _Decimal32
case DeclSpec::TST_decimal64: // _Decimal64
case DeclSpec::TST_decimal128: // _Decimal128
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
Result = Context.IntTy;
declarator.setInvalidType(true);
break;
case DeclSpec::TST_class:
case DeclSpec::TST_enum:
case DeclSpec::TST_union:
case DeclSpec::TST_struct:
case DeclSpec::TST_interface: {
TagDecl *D = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl());
if (!D) {
// This can happen in C++ with ambiguous lookups.
Result = Context.IntTy;
declarator.setInvalidType(true);
break;
}
// If the type is deprecated or unavailable, diagnose it.
S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc());
assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
// TypeQuals handled by caller.
Result = Context.getTypeDeclType(D);
// In both C and C++, make an ElaboratedType.
ElaboratedTypeKeyword Keyword
= ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result,
DS.isTypeSpecOwned() ? D : nullptr);
break;
}
case DeclSpec::TST_typename: {
assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
DS.getTypeSpecSign() == 0 &&
"Can't handle qualifiers on typedef names yet!");
Result = S.GetTypeFromParser(DS.getRepAsType());
if (Result.isNull()) {
declarator.setInvalidType(true);
}
// TypeQuals handled by caller.
break;
}
case DeclSpec::TST_typeofType:
// FIXME: Preserve type source info.
Result = S.GetTypeFromParser(DS.getRepAsType());
assert(!Result.isNull() && "Didn't get a type for typeof?");
if (!Result->isDependentType())
if (const TagType *TT = Result->getAs<TagType>())
S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
// TypeQuals handled by caller.
Result = Context.getTypeOfType(Result);
break;
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for typeof?");
// TypeQuals handled by caller.
Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
break;
}
case DeclSpec::TST_decltype: {
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for decltype?");
// TypeQuals handled by caller.
Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
break;
}
case DeclSpec::TST_underlyingType:
Result = S.GetTypeFromParser(DS.getRepAsType());
assert(!Result.isNull() && "Didn't get a type for __underlying_type?");
Result = S.BuildUnaryTransformType(Result,
UnaryTransformType::EnumUnderlyingType,
DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
break;
case DeclSpec::TST_auto:
Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false);
break;
case DeclSpec::TST_auto_type:
Result = Context.getAutoType(QualType(), AutoTypeKeyword::GNUAutoType, false);
break;
case DeclSpec::TST_decltype_auto:
Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto,
/*IsDependent*/ false);
break;
case DeclSpec::TST_unknown_anytype:
Result = Context.UnknownAnyTy;
break;
case DeclSpec::TST_atomic:
Result = S.GetTypeFromParser(DS.getRepAsType());
assert(!Result.isNull() && "Didn't get a type for _Atomic?");
Result = S.BuildAtomicType(Result, DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
break;
#define GENERIC_IMAGE_TYPE(ImgType, Id) \
case DeclSpec::TST_##ImgType##_t: \
switch (getImageAccess(DS.getAttributes())) { \
case OpenCLAccessAttr::Keyword_write_only: \
Result = Context.Id##WOTy; \
break; \
case OpenCLAccessAttr::Keyword_read_write: \
Result = Context.Id##RWTy; \
break; \
case OpenCLAccessAttr::Keyword_read_only: \
Result = Context.Id##ROTy; \
break; \
} \
break;
#include "clang/Basic/OpenCLImageTypes.def"
case DeclSpec::TST_error:
Result = Context.IntTy;
declarator.setInvalidType(true);
break;
}
if (S.getLangOpts().OpenCL &&
S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
declarator.setInvalidType(true);
bool IsFixedPointType = DS.getTypeSpecType() == DeclSpec::TST_accum ||
DS.getTypeSpecType() == DeclSpec::TST_fract;
// Only fixed point types can be saturated
if (DS.isTypeSpecSat() && !IsFixedPointType)
S.Diag(DS.getTypeSpecSatLoc(), diag::err_invalid_saturation_spec)
<< DS.getSpecifierName(DS.getTypeSpecType(),
Context.getPrintingPolicy());
// Handle complex types.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
if (S.getLangOpts().Freestanding)
S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
Result = Context.getComplexType(Result);
} else if (DS.isTypeAltiVecVector()) {
unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
VectorType::VectorKind VecKind = VectorType::AltiVecVector;
if (DS.isTypeAltiVecPixel())
VecKind = VectorType::AltiVecPixel;
else if (DS.isTypeAltiVecBool())
VecKind = VectorType::AltiVecBool;
Result = Context.getVectorType(Result, 128/typeSize, VecKind);
}
// FIXME: Imaginary.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary)
S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported);
// Before we process any type attributes, synthesize a block literal
// function declarator if necessary.
if (declarator.getContext() == DeclaratorContext::BlockLiteralContext)
maybeSynthesizeBlockSignature(state, Result);
// Apply any type attributes from the decl spec. This may cause the
// list of type attributes to be temporarily saved while the type
// attributes are pushed around.
// pipe attributes will be handled later ( at GetFullTypeForDeclarator )
if (!DS.isTypeSpecPipe())
processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes());
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
// Warn about CV qualifiers on function types.
// C99 6.7.3p8:
// If the specification of a function type includes any type qualifiers,
// the behavior is undefined.
// C++11 [dcl.fct]p7:
// The effect of a cv-qualifier-seq in a function declarator is not the
// same as adding cv-qualification on top of the function type. In the
// latter case, the cv-qualifiers are ignored.
if (TypeQuals && Result->isFunctionType()) {
diagnoseAndRemoveTypeQualifiers(
S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile,
S.getLangOpts().CPlusPlus
? diag::warn_typecheck_function_qualifiers_ignored
: diag::warn_typecheck_function_qualifiers_unspecified);
// No diagnostic for 'restrict' or '_Atomic' applied to a
// function type; we'll diagnose those later, in BuildQualifiedType.
}
// C++11 [dcl.ref]p1:
// Cv-qualified references are ill-formed except when the
// cv-qualifiers are introduced through the use of a typedef-name
// or decltype-specifier, in which case the cv-qualifiers are ignored.
//
// There don't appear to be any other contexts in which a cv-qualified
// reference type could be formed, so the 'ill-formed' clause here appears
// to never happen.
if (TypeQuals && Result->isReferenceType()) {
diagnoseAndRemoveTypeQualifiers(
S, DS, TypeQuals, Result,
DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic,
diag::warn_typecheck_reference_qualifiers);
}
// C90 6.5.3 constraints: "The same type qualifier shall not appear more
// than once in the same specifier-list or qualifier-list, either directly
// or via one or more typedefs."
if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
&& TypeQuals & Result.getCVRQualifiers()) {
if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) {
S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
<< "const";
}
if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) {
S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
<< "volatile";
}
// C90 doesn't have restrict nor _Atomic, so it doesn't force us to
// produce a warning in this case.
}
QualType Qualified = S.BuildQualifiedType(Result, DeclLoc, TypeQuals, &DS);
// If adding qualifiers fails, just use the unqualified type.
if (Qualified.isNull())
declarator.setInvalidType(true);
else
Result = Qualified;
}
assert(!Result.isNull() && "This function should not return a null type");
return Result;
}
static std::string getPrintableNameForEntity(DeclarationName Entity) {
if (Entity)
return Entity.getAsString();
return "type name";
}
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
Qualifiers Qs, const DeclSpec *DS) {
if (T.isNull())
return QualType();
// Ignore any attempt to form a cv-qualified reference.
if (T->isReferenceType()) {
Qs.removeConst();
Qs.removeVolatile();
}
// Enforce C99 6.7.3p2: "Types other than pointer types derived from
// object or incomplete types shall not be restrict-qualified."
if (Qs.hasRestrict()) {
unsigned DiagID = 0;
QualType ProblemTy;
if (T->isAnyPointerType() || T->isReferenceType() ||
T->isMemberPointerType()) {
QualType EltTy;
if (T->isObjCObjectPointerType())
EltTy = T;
else if (const MemberPointerType *PTy = T->getAs<MemberPointerType>())
EltTy = PTy->getPointeeType();
else
EltTy = T->getPointeeType();
// If we have a pointer or reference, the pointee must have an object
// incomplete type.
if (!EltTy->isIncompleteOrObjectType()) {
DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
ProblemTy = EltTy;
}
} else if (!T->isDependentType()) {
DiagID = diag::err_typecheck_invalid_restrict_not_pointer;
ProblemTy = T;
}
if (DiagID) {
Diag(DS ? DS->getRestrictSpecLoc() : Loc, DiagID) << ProblemTy;
Qs.removeRestrict();
}
}
return Context.getQualifiedType(T, Qs);
}
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
unsigned CVRAU, const DeclSpec *DS) {
if (T.isNull())
return QualType();
// Ignore any attempt to form a cv-qualified reference.
if (T->isReferenceType())
CVRAU &=
~(DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic);
// Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic and
// TQ_unaligned;
unsigned CVR = CVRAU & ~(DeclSpec::TQ_atomic | DeclSpec::TQ_unaligned);
// C11 6.7.3/5:
// If the same qualifier appears more than once in the same
// specifier-qualifier-list, either directly or via one or more typedefs,
// the behavior is the same as if it appeared only once.
//
// It's not specified what happens when the _Atomic qualifier is applied to
// a type specified with the _Atomic specifier, but we assume that this
// should be treated as if the _Atomic qualifier appeared multiple times.
if (CVRAU & DeclSpec::TQ_atomic && !T->isAtomicType()) {
// C11 6.7.3/5:
// If other qualifiers appear along with the _Atomic qualifier in a
// specifier-qualifier-list, the resulting type is the so-qualified
// atomic type.
//
// Don't need to worry about array types here, since _Atomic can't be
// applied to such types.
SplitQualType Split = T.getSplitUnqualifiedType();
T = BuildAtomicType(QualType(Split.Ty, 0),
DS ? DS->getAtomicSpecLoc() : Loc);
if (T.isNull())
return T;
Split.Quals.addCVRQualifiers(CVR);
return BuildQualifiedType(T, Loc, Split.Quals);
}
Qualifiers Q = Qualifiers::fromCVRMask(CVR);
Q.setUnaligned(CVRAU & DeclSpec::TQ_unaligned);
return BuildQualifiedType(T, Loc, Q, DS);
}
/// Build a paren type including \p T.
QualType Sema::BuildParenType(QualType T) {
return Context.getParenType(T);
}
/// Given that we're building a pointer or reference to the given
static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
SourceLocation loc,
bool isReference) {
// Bail out if retention is unrequired or already specified.
if (!type->isObjCLifetimeType() ||
type.getObjCLifetime() != Qualifiers::OCL_None)
return type;
Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None;
// If the object type is const-qualified, we can safely use
// __unsafe_unretained. This is safe (because there are no read
// barriers), and it'll be safe to coerce anything but __weak* to
// the resulting type.
if (type.isConstQualified()) {
implicitLifetime = Qualifiers::OCL_ExplicitNone;
// Otherwise, check whether the static type does not require
// retaining. This currently only triggers for Class (possibly
// protocol-qualifed, and arrays thereof).
} else if (type->isObjCARCImplicitlyUnretainedType()) {
implicitLifetime = Qualifiers::OCL_ExplicitNone;
// If we are in an unevaluated context, like sizeof, skip adding a
// qualification.
} else if (S.isUnevaluatedContext()) {
return type;
// If that failed, give an error and recover using __strong. __strong
// is the option most likely to prevent spurious second-order diagnostics,
// like when binding a reference to a field.
} else {
// These types can show up in private ivars in system headers, so
// we need this to not be an error in those cases. Instead we
// want to delay.
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
S.DelayedDiagnostics.add(
sema::DelayedDiagnostic::makeForbiddenType(loc,
diag::err_arc_indirect_no_ownership, type, isReference));
} else {
S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference;
}
implicitLifetime = Qualifiers::OCL_Strong;
}
assert(implicitLifetime && "didn't infer any lifetime!");
Qualifiers qs;
qs.addObjCLifetime(implicitLifetime);
return S.Context.getQualifiedType(type, qs);
}
static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
std::string Quals = FnTy->getMethodQuals().getAsString();
switch (FnTy->getRefQualifier()) {
case RQ_None:
break;
case RQ_LValue:
if (!Quals.empty())
Quals += ' ';
Quals += '&';
break;
case RQ_RValue:
if (!Quals.empty())
Quals += ' ';
Quals += "&&";
break;
}
return Quals;
}
namespace {
/// Kinds of declarator that cannot contain a qualified function type.
///
/// C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6:
/// a function type with a cv-qualifier or a ref-qualifier can only appear
/// at the topmost level of a type.
///
/// Parens and member pointers are permitted. We don't diagnose array and
/// function declarators, because they don't allow function types at all.
///
/// The values of this enum are used in diagnostics.
enum QualifiedFunctionKind { QFK_BlockPointer, QFK_Pointer, QFK_Reference };
} // end anonymous namespace
/// Check whether the type T is a qualified function type, and if it is,
/// diagnose that it cannot be contained within the given kind of declarator.
static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
QualifiedFunctionKind QFK) {
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
const FunctionProtoType *FPT = T->getAs<FunctionProtoType>();
if (!FPT || (FPT->getMethodQuals().empty() && FPT->getRefQualifier() == RQ_None))
return false;
S.Diag(Loc, diag::err_compound_qualified_function_type)
<< QFK << isa<FunctionType>(T.IgnoreParens()) << T
<< getFunctionQualifiersAsString(FPT);
return true;
}
/// Build a pointer type.
///
/// \param T The type to which we'll be building a pointer.
///
/// \param Loc The location of the entity whose type involves this
/// pointer type or, if there is no such entity, the location of the
/// type that will have pointer type.
///
/// \param Entity The name of the entity that involves the pointer
/// type, if known.
///
/// \returns A suitable pointer type, if there are no
/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity) {
if (T->isReferenceType()) {
// C++ 8.3.2p4: There shall be no ... pointers to references ...
Diag(Loc, diag::err_illegal_decl_pointer_to_reference)
<< getPrintableNameForEntity(Entity) << T;
return QualType();
}
if (T->isFunctionType() && getLangOpts().OpenCL) {
Diag(Loc, diag::err_opencl_function_pointer);
return QualType();
}
if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer))
return QualType();
assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
// In ARC, it is forbidden to build pointers to unqualified pointers.
if (getLangOpts().ObjCAutoRefCount)
T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false);
// Build the pointer type.
return Context.getPointerType(T);
}
/// Build a reference type.
///
/// \param T The type to which we'll be building a reference.
///
/// \param Loc The location of the entity whose type involves this
/// reference type or, if there is no such entity, the location of the
/// type that will have reference type.
///
/// \param Entity The name of the entity that involves the reference
/// type, if known.
///
/// \returns A suitable reference type, if there are no
/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
SourceLocation Loc,
DeclarationName Entity) {
assert(Context.getCanonicalType(T) != Context.OverloadTy &&
"Unresolved overloaded function type");
// C++0x [dcl.ref]p6:
// If a typedef (7.1.3), a type template-parameter (14.3.1), or a
// decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
// type T, an attempt to create the type "lvalue reference to cv TR" creates
// the type "lvalue reference to T", while an attempt to create the type
// "rvalue reference to cv TR" creates the type TR.
bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
// C++ [dcl.ref]p4: There shall be no references to references.
//
// According to C++ DR 106, references to references are only
// diagnosed when they are written directly (e.g., "int & &"),
// but not when they happen via a typedef:
//
// typedef int& intref;
// typedef intref& intref2;
//
// Parser::ParseDeclaratorInternal diagnoses the case where
// references are written directly; here, we handle the
// collapsing of references-to-references as described in C++0x.
// DR 106 and 540 introduce reference-collapsing into C++98/03.
// C++ [dcl.ref]p1:
// A declarator that specifies the type "reference to cv void"
// is ill-formed.
if (T->isVoidType()) {
Diag(Loc, diag::err_reference_to_void);
return QualType();
}
if (checkQualifiedFunction(*this, T, Loc, QFK_Reference))
return QualType();
// In ARC, it is forbidden to build references to unqualified pointers.
if (getLangOpts().ObjCAutoRefCount)
T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
// Handle restrict on references.
if (LValueRef)
return Context.getLValueReferenceType(T, SpelledAsLValue);
return Context.getRValueReferenceType(T);
}
/// Build a Read-only Pipe type.
///
/// \param T The type to which we'll be building a Pipe.
///
/// \param Loc We do not use it for now.
///
/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
/// NULL type.
QualType Sema::BuildReadPipeType(QualType T, SourceLocation Loc) {
return Context.getReadPipeType(T);
}
/// Build a Write-only Pipe type.
///
/// \param T The type to which we'll be building a Pipe.
///
/// \param Loc We do not use it for now.
///
/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
/// NULL type.
QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
/// Check whether the specified array size makes the array type a VLA. If so,
/// return true, if not, return the size of the array in SizeVal.
static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
// If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
// (like gnu99, but not c99) accept any evaluatable value as an extension.
class VLADiagnoser : public Sema::VerifyICEDiagnoser {
public:
VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {}
void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
}
void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override {
S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR;
}
} Diagnoser;
return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser,
S.LangOpts.GNUMode ||
S.LangOpts.OpenCL).isInvalid();
}
/// Build an array type.
///
/// \param T The type of each element in the array.
///
/// \param ASM C99 array size modifier (e.g., '*', 'static').
///
/// \param ArraySize Expression describing the size of the array.
///
/// \param Brackets The range from the opening '[' to the closing ']'.
///
/// \param Entity The name of the entity that involves the array
/// type, if known.
///
/// \returns A suitable array type, if there are no errors. Otherwise,
/// returns a NULL type.
QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity) {
SourceLocation Loc = Brackets.getBegin();
if (getLangOpts().CPlusPlus) {
// C++ [dcl.array]p1:
// T is called the array element type; this type shall not be a reference
// type, the (possibly cv-qualified) type void, a function type or an
// abstract class type.
//
// C++ [dcl.array]p3:
// When several "array of" specifications are adjacent, [...] only the
// first of the constant expressions that specify the bounds of the arrays
// may be omitted.
//
// Note: function types are handled in the common path with C.
if (T->isReferenceType()) {
Diag(Loc, diag::err_illegal_decl_array_of_references)
<< getPrintableNameForEntity(Entity) << T;
return QualType();
}
if (T->isVoidType() || T->isIncompleteArrayType()) {
Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
return QualType();
}
if (RequireNonAbstractType(Brackets.getBegin(), T,
diag::err_array_of_abstract_type))
return QualType();
// Mentioning a member pointer type for an array type causes us to lock in
// an inheritance model, even if it's inside an unused typedef.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
if (!MPTy->getClass()->isDependentType())
(void)isCompleteType(Loc, T);
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
if (RequireCompleteType(Loc, T,
diag::err_illegal_decl_array_incomplete_type))
return QualType();
}
if (T->isFunctionType()) {
Diag(Loc, diag::err_illegal_decl_array_of_functions)
<< getPrintableNameForEntity(Entity) << T;
return QualType();
}
if (const RecordType *EltTy = T->getAs<RecordType>()) {
// If the element type is a struct or union that contains a variadic
// array, accept it as a GNU extension: C99 6.7.2.1p2.
if (EltTy->getDecl()->hasFlexibleArrayMember())
Diag(Loc, diag::ext_flexible_array_in_array) << T;
} else if (T->isObjCObjectType()) {
Diag(Loc, diag::err_objc_array_of_interfaces) << T;
return QualType();
}
// Do placeholder conversions on the array size expression.
if (ArraySize && ArraySize->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(ArraySize);
if (Result.isInvalid()) return QualType();
ArraySize = Result.get();
}
// Do lvalue-to-rvalue conversions on the array size expression.
if (ArraySize && !ArraySize->isRValue()) {
ExprResult Result = DefaultLvalueConversion(ArraySize);
if (Result.isInvalid())
return QualType();
ArraySize = Result.get();
}
// C99 6.7.5.2p1: The size expression shall have integer type.
// C++11 allows contextual conversions to such types.
if (!getLangOpts().CPlusPlus11 &&
ArraySize && !ArraySize->isTypeDependent() &&
!ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int)
<< ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
}
llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
if (!ArraySize) {
if (ASM == ArrayType::Star)
T = Context.getVariableArrayType(T, nullptr, ASM, Quals, Brackets);
else
T = Context.getIncompleteArrayType(T, ASM, Quals);
} else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
} else if ((!T->isDependentType() && !T->isIncompleteType() &&
!T->isConstantSizeType()) ||
isArraySizeVLA(*this, ArraySize, ConstVal)) {
// Even in C++11, don't allow contextual conversions in the array bound
// of a VLA.
if (getLangOpts().CPlusPlus11 &&
!ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int)
<< ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
}
// C99: an array with an element type that has a non-constant-size is a VLA.
// C99: an array with a non-ICE size is a VLA. We accept any expression
// that we can fold to a non-zero positive value as an extension.
T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
} else {
// C99 6.7.5.2p1: If the expression is a constant expression, it shall
// have a value greater than zero.
if (ConstVal.isSigned() && ConstVal.isNegative()) {
if (Entity)
Diag(ArraySize->getBeginLoc(), diag::err_decl_negative_array_size)
<< getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
else
Diag(ArraySize->getBeginLoc(), diag::err_typecheck_negative_array_size)
<< ArraySize->getSourceRange();
return QualType();
}
if (ConstVal == 0) {
// GCC accepts zero sized static arrays. We allow them when
// we're not in a SFINAE context.
Diag(ArraySize->getBeginLoc(), isSFINAEContext()
? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
if (ASM == ArrayType::Static) {
Diag(ArraySize->getBeginLoc(),
diag::warn_typecheck_zero_static_array_size)
<< ArraySize->getSourceRange();
ASM = ArrayType::Normal;
}
} else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
!T->isIncompleteType() && !T->isUndeducedType()) {
// Is the array too large?
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
<< ConstVal.toString(10) << ArraySize->getSourceRange();
return QualType();
}
}
T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
}
// OpenCL v1.2 s6.9.d: variable length arrays are not supported.
if (getLangOpts().OpenCL && T->isVariableArrayType()) {
Diag(Loc, diag::err_opencl_vla);
return QualType();
}
if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
// CUDA device code and some other targets don't support VLAs.
targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
? diag::err_cuda_vla
: diag::err_vla_unsupported)
<< ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
? CurrentCUDATarget()
: CFT_InvalidTarget);
}
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
if (!getLangOpts().C99) {
if (T->isVariableArrayType()) {
// Prohibit the use of VLAs during template argument deduction.
if (isSFINAEContext()) {
Diag(Loc, diag::err_vla_in_sfinae);
return QualType();
}
// Just extwarn about VLAs.
else
Diag(Loc, diag::ext_vla);
} else if (ASM != ArrayType::Normal || Quals != 0)
Diag(Loc,
getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
: diag::ext_c99_array_usage) << ASM;
}
if (T->isVariableArrayType()) {
// Warn about VLAs for -Wvla.
Diag(Loc, diag::warn_vla_used);
}
// OpenCL v2.0 s6.12.5 - Arrays of blocks are not supported.
// OpenCL v2.0 s6.16.13.1 - Arrays of pipe type are not supported.
// OpenCL v2.0 s6.9.b - Arrays of image/sampler type are not supported.
if (getLangOpts().OpenCL) {
const QualType ArrType = Context.getBaseElementType(T);
if (ArrType->isBlockPointerType() || ArrType->isPipeType() ||
ArrType->isSamplerT() || ArrType->isImageType()) {
Diag(Loc, diag::err_opencl_invalid_type_array) << ArrType;
return QualType();
}
}
return T;
}
QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
SourceLocation AttrLoc) {
// The base type must be integer (not Boolean or enumeration) or float, and
// can't already be a vector.
if (!CurType->isDependentType() &&
(!CurType->isBuiltinType() || CurType->isBooleanType() ||
(!CurType->isIntegerType() && !CurType->isRealFloatingType()))) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << CurType;
return QualType();
}
if (SizeExpr->isTypeDependent() || SizeExpr->isValueDependent())
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
VectorType::GenericVector);
llvm::APSInt VecSize(32);
if (!SizeExpr->isIntegerConstantExpr(VecSize, Context)) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "vector_size" << AANT_ArgumentIntegerConstant
<< SizeExpr->getSourceRange();
return QualType();
}
if (CurType->isDependentType())
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
VectorType::GenericVector);
unsigned VectorSize = static_cast<unsigned>(VecSize.getZExtValue() * 8);
unsigned TypeSize = static_cast<unsigned>(Context.getTypeSize(CurType));
if (VectorSize == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size) << SizeExpr->getSourceRange();
return QualType();
}
// vecSize is specified in bytes - convert to bits.
if (VectorSize % TypeSize) {
Diag(AttrLoc, diag::err_attribute_invalid_size)
<< SizeExpr->getSourceRange();
return QualType();
}
if (VectorType::isVectorSizeTooLarge(VectorSize / TypeSize)) {
Diag(AttrLoc, diag::err_attribute_size_too_large)
<< SizeExpr->getSourceRange();
return QualType();
}
return Context.getVectorType(CurType, VectorSize / TypeSize,
VectorType::GenericVector);
}
/// Build an ext-vector type.
///
/// Run the required checks for the extended vector type.
QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc) {
// Unlike gcc's vector_size attribute, we do not allow vectors to be defined
// in conjunction with complex types (pointers, arrays, functions, etc.).
//
// Additionally, OpenCL prohibits vectors of booleans (they're considered a
// reserved data type under OpenCL v2.0 s6.1.4), we don't support selects
// on bitvectors, and we have no well-defined ABI for bitvectors, so vectors
// of bool aren't allowed.
if ((!T->isDependentType() && !T->isIntegerType() &&
!T->isRealFloatingType()) ||
T->isBooleanType()) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T;
return QualType();
}
if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) {
llvm::APSInt vecSize(32);
if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "ext_vector_type" << AANT_ArgumentIntegerConstant
<< ArraySize->getSourceRange();
return QualType();
}
// Unlike gcc's vector_size attribute, the size is specified as the
// number of elements, not the number of bytes.
unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
if (vectorSize == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size)
<< ArraySize->getSourceRange();
return QualType();
}
if (VectorType::isVectorSizeTooLarge(vectorSize)) {
Diag(AttrLoc, diag::err_attribute_size_too_large)
<< ArraySize->getSourceRange();
return QualType();
}
return Context.getExtVectorType(T, vectorSize);
}
return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
}
bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
if (T->isArrayType() || T->isFunctionType()) {
Diag(Loc, diag::err_func_returning_array_function)
<< T->isFunctionType() << T;
return true;
}
// Functions cannot return half FP.
if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 <<
FixItHint::CreateInsertion(Loc, "*");
return true;
}
// Methods cannot return interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
Diag(Loc, diag::err_object_cannot_be_passed_returned_by_value)
<< 0 << T << FixItHint::CreateInsertion(Loc, "*");
return true;
}
if (T.hasNonTrivialToPrimitiveDestructCUnion() ||
T.hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnion(T, Loc, NTCUC_FunctionReturn,
NTCUK_Destruct|NTCUK_Copy);
return false;
}
/// Check the extended parameter information. Most of the necessary
/// checking should occur when applying the parameter attribute; the
/// only other checks required are positional restrictions.
static void checkExtParameterInfos(Sema &S, ArrayRef<QualType> paramTypes,
const FunctionProtoType::ExtProtoInfo &EPI,
llvm::function_ref<SourceLocation(unsigned)> getParamLoc) {
assert(EPI.ExtParameterInfos && "shouldn't get here without param infos");
bool hasCheckedSwiftCall = false;
auto checkForSwiftCC = [&](unsigned paramIndex) {
// Only do this once.
if (hasCheckedSwiftCall) return;
hasCheckedSwiftCall = true;
if (EPI.ExtInfo.getCC() == CC_Swift) return;
S.Diag(getParamLoc(paramIndex), diag::err_swift_param_attr_not_swiftcall)
<< getParameterABISpelling(EPI.ExtParameterInfos[paramIndex].getABI());
};
for (size_t paramIndex = 0, numParams = paramTypes.size();
paramIndex != numParams; ++paramIndex) {
switch (EPI.ExtParameterInfos[paramIndex].getABI()) {
// Nothing interesting to check for orindary-ABI parameters.
case ParameterABI::Ordinary:
continue;
// swift_indirect_result parameters must be a prefix of the function
// arguments.
case ParameterABI::SwiftIndirectResult:
checkForSwiftCC(paramIndex);
if (paramIndex != 0 &&
EPI.ExtParameterInfos[paramIndex - 1].getABI()
!= ParameterABI::SwiftIndirectResult) {
S.Diag(getParamLoc(paramIndex),
diag::err_swift_indirect_result_not_first);
}
continue;
case ParameterABI::SwiftContext:
checkForSwiftCC(paramIndex);
continue;
// swift_error parameters must be preceded by a swift_context parameter.
case ParameterABI::SwiftErrorResult:
checkForSwiftCC(paramIndex);
if (paramIndex == 0 ||
EPI.ExtParameterInfos[paramIndex - 1].getABI() !=
ParameterABI::SwiftContext) {
S.Diag(getParamLoc(paramIndex),
diag::err_swift_error_result_not_after_swift_context);
}
continue;
}
llvm_unreachable("bad ABI kind");
}
}
QualType Sema::BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI) {
bool Invalid = false;
Invalid |= CheckFunctionReturnType(T, Loc);
for (unsigned Idx = 0, Cnt = ParamTypes.size(); Idx < Cnt; ++Idx) {
// FIXME: Loc is too inprecise here, should use proper locations for args.
QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]);
if (ParamType->isVoidType()) {
Diag(Loc, diag::err_param_with_void_type);
Invalid = true;
} else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
// Disallow half FP arguments.
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
FixItHint::CreateInsertion(Loc, "*");
Invalid = true;
}
ParamTypes[Idx] = ParamType;
}
if (EPI.ExtParameterInfos) {
checkExtParameterInfos(*this, ParamTypes, EPI,
[=](unsigned i) { return Loc; });
}
if (EPI.ExtInfo.getProducesResult()) {
// This is just a warning, so we can't fail to build if we see it.
checkNSReturnsRetainedReturnType(Loc, T);
}
if (Invalid)
return QualType();
return Context.getFunctionType(T, ParamTypes, EPI);
}
/// Build a member pointer type \c T Class::*.
///
/// \param T the type to which the member pointer refers.
/// \param Class the class type into which the member pointer points.
/// \param Loc the location where this type begins
/// \param Entity the name of the entity that will have this member pointer type
///
/// \returns a member pointer type, if successful, or a NULL type if there was
/// an error.
QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity) {
// Verify that we're not building a pointer to pointer to function with
// exception specification.
if (CheckDistantExceptionSpec(T)) {
Diag(Loc, diag::err_distant_exception_spec);
return QualType();
}
// C++ 8.3.3p3: A pointer to member shall not point to ... a member
// with reference type, or "cv void."
if (T->isReferenceType()) {
Diag(Loc, diag::err_illegal_decl_mempointer_to_reference)
<< getPrintableNameForEntity(Entity) << T;
return QualType();
}
if (T->isVoidType()) {
Diag(Loc, diag::err_illegal_decl_mempointer_to_void)
<< getPrintableNameForEntity(Entity);
return QualType();
}
if (!Class->isDependentType() && !Class->isRecordType()) {
Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class;
return QualType();
}
// Adjust the default free function calling convention to the default method
// calling convention.
bool IsCtorOrDtor =
(Entity.getNameKind() == DeclarationName::CXXConstructorName) ||
(Entity.getNameKind() == DeclarationName::CXXDestructorName);
if (T->isFunctionType())
adjustMemberFunctionCC(T, /*IsStatic=*/false, IsCtorOrDtor, Loc);
return Context.getMemberPointerType(T, Class.getTypePtr());
}
/// Build a block pointer type.
///
/// \param T The type to which we'll be building a block pointer.
///
/// \param Loc The source location, used for diagnostics.
///
/// \param Entity The name of the entity that involves the block pointer
/// type, if known.
///
/// \returns A suitable block pointer type, if there are no
/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildBlockPointerType(QualType T,
SourceLocation Loc,
DeclarationName Entity) {
if (!T->isFunctionType()) {
Diag(Loc, diag::err_nonfunction_block_type);
return QualType();
}
if (checkQualifiedFunction(*this, T, Loc, QFK_BlockPointer))
return QualType();
return Context.getBlockPointerType(T);
}
QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
QualType QT = Ty.get();
if (QT.isNull()) {
if (TInfo) *TInfo = nullptr;
return QualType();
}
TypeSourceInfo *DI = nullptr;
if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) {
QT = LIT->getType();
DI = LIT->getTypeSourceInfo();
}
if (TInfo) *TInfo = DI;
return QT;
}
static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
Qualifiers::ObjCLifetime ownership,
unsigned chunkIndex);
/// Given that this is the declaration of a parameter under ARC,
/// attempt to infer attributes and such for pointer-to-whatever
/// types.
static void inferARCWriteback(TypeProcessingState &state,
QualType &declSpecType) {
Sema &S = state.getSema();
Declarator &declarator = state.getDeclarator();
// TODO: should we care about decl qualifiers?
// Check whether the declarator has the expected form. We walk
// from the inside out in order to make the block logic work.
unsigned outermostPointerIndex = 0;
bool isBlockPointer = false;
unsigned numPointers = 0;
for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = i;
DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex);
switch (chunk.Kind) {
case DeclaratorChunk::Paren:
// Ignore parens.
break;
case DeclaratorChunk::Reference:
case DeclaratorChunk::Pointer:
// Count the number of pointers. Treat references
// interchangeably as pointers; if they're mis-ordered, normal
// type building will discover that.
outermostPointerIndex = chunkIndex;
numPointers++;
break;
case DeclaratorChunk::BlockPointer:
// If we have a pointer to block pointer, that's an acceptable
// indirect reference; anything else is not an application of
// the rules.
if (numPointers != 1) return;
numPointers++;
outermostPointerIndex = chunkIndex;
isBlockPointer = true;
// We don't care about pointer structure in return values here.
goto done;
case DeclaratorChunk::Array: // suppress if written (id[])?
case DeclaratorChunk::Function:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
return;
}
}
done:
// If we have *one* pointer, then we want to throw the qualifier on
// the declaration-specifiers, which means that it needs to be a
// retainable object type.
if (numPointers == 1) {
// If it's not a retainable object type, the rule doesn't apply.
if (!declSpecType->isObjCRetainableType()) return;
// If it already has lifetime, don't do anything.
if (declSpecType.getObjCLifetime()) return;
// Otherwise, modify the type in-place.
Qualifiers qs;
if (declSpecType->isObjCARCImplicitlyUnretainedType())
qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
else
qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing);
declSpecType = S.Context.getQualifiedType(declSpecType, qs);
// If we have *two* pointers, then we want to throw the qualifier on
// the outermost pointer.
} else if (numPointers == 2) {
// If we don't have a block pointer, we need to check whether the
// declaration-specifiers gave us something that will turn into a
// retainable object pointer after we slap the first pointer on it.
if (!isBlockPointer && !declSpecType->isObjCObjectType())
return;
// Look for an explicit lifetime attribute there.
DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex);
if (chunk.Kind != DeclaratorChunk::Pointer &&
chunk.Kind != DeclaratorChunk::BlockPointer)
return;
for (const ParsedAttr &AL : chunk.getAttrs())
if (AL.getKind() == ParsedAttr::AT_ObjCOwnership)
return;
transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
outermostPointerIndex);
// Any other number of pointers/references does not trigger the rule.
} else return;
// TODO: mark whether we did this inference?
}
void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc,
SourceLocation VolatileQualLoc,
SourceLocation RestrictQualLoc,
SourceLocation AtomicQualLoc,
SourceLocation UnalignedQualLoc) {
if (!Quals)
return;
struct Qual {
const char *Name;
unsigned Mask;
SourceLocation Loc;
} const QualKinds[5] = {
{ "const", DeclSpec::TQ_const, ConstQualLoc },
{ "volatile", DeclSpec::TQ_volatile, VolatileQualLoc },
{ "restrict", DeclSpec::TQ_restrict, RestrictQualLoc },
{ "__unaligned", DeclSpec::TQ_unaligned, UnalignedQualLoc },
{ "_Atomic", DeclSpec::TQ_atomic, AtomicQualLoc }
};
SmallString<32> QualStr;
unsigned NumQuals = 0;
SourceLocation Loc;
FixItHint FixIts[5];
// Build a string naming the redundant qualifiers.
for (auto &E : QualKinds) {
if (Quals & E.Mask) {
if (!QualStr.empty()) QualStr += ' ';
QualStr += E.Name;
// If we have a location for the qualifier, offer a fixit.
SourceLocation QualLoc = E.Loc;
if (QualLoc.isValid()) {
FixIts[NumQuals] = FixItHint::CreateRemoval(QualLoc);
if (Loc.isInvalid() ||
getSourceManager().isBeforeInTranslationUnit(QualLoc, Loc))
Loc = QualLoc;
}
++NumQuals;
}
}
Diag(Loc.isInvalid() ? FallbackLoc : Loc, DiagID)
<< QualStr << NumQuals << FixIts[0] << FixIts[1] << FixIts[2] << FixIts[3];
}
// Diagnose pointless type qualifiers on the return type of a function.
static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
Declarator &D,
unsigned FunctionChunkIndex) {
if (D.getTypeObject(FunctionChunkIndex).Fun.hasTrailingReturnType()) {
// FIXME: TypeSourceInfo doesn't preserve location information for
// qualifiers.
S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
RetTy.getLocalCVRQualifiers(),
D.getIdentifierLoc());
return;
}
for (unsigned OuterChunkIndex = FunctionChunkIndex + 1,
End = D.getNumTypeObjects();
OuterChunkIndex != End; ++OuterChunkIndex) {
DeclaratorChunk &OuterChunk = D.getTypeObject(OuterChunkIndex);
switch (OuterChunk.Kind) {
case DeclaratorChunk::Paren:
continue;
case DeclaratorChunk::Pointer: {
DeclaratorChunk::PointerTypeInfo &PTI = OuterChunk.Ptr;
S.diagnoseIgnoredQualifiers(
diag::warn_qual_return_type,
PTI.TypeQuals,
SourceLocation(),
SourceLocation::getFromRawEncoding(PTI.ConstQualLoc),
SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc),
SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc),
SourceLocation::getFromRawEncoding(PTI.AtomicQualLoc),
SourceLocation::getFromRawEncoding(PTI.UnalignedQualLoc));
return;
}
case DeclaratorChunk::Function:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::Reference:
case DeclaratorChunk::Array:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
// FIXME: We can't currently provide an accurate source location and a
// fix-it hint for these.
unsigned AtomicQual = RetTy->isAtomicType() ? DeclSpec::TQ_atomic : 0;
S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
RetTy.getCVRQualifiers() | AtomicQual,
D.getIdentifierLoc());
return;
}
llvm_unreachable("unknown declarator chunk kind");
}
// If the qualifiers come from a conversion function type, don't diagnose
// them -- they're not necessarily redundant, since such a conversion
// operator can be explicitly called as "x.operator const int()".
if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId)
return;
// Just parens all the way out to the decl specifiers. Diagnose any qualifiers
// which are present there.
S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
D.getDeclSpec().getTypeQualifiers(),
D.getIdentifierLoc(),
D.getDeclSpec().getConstSpecLoc(),
D.getDeclSpec().getVolatileSpecLoc(),
D.getDeclSpec().getRestrictSpecLoc(),
D.getDeclSpec().getAtomicSpecLoc(),
D.getDeclSpec().getUnalignedSpecLoc());
}
static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
TypeSourceInfo *&ReturnTypeInfo) {
Sema &SemaRef = state.getSema();
Declarator &D = state.getDeclarator();
QualType T;
ReturnTypeInfo = nullptr;
// The TagDecl owned by the DeclSpec.
TagDecl *OwnedTagDecl = nullptr;
switch (D.getName().getKind()) {
case UnqualifiedIdKind::IK_ImplicitSelfParam:
case UnqualifiedIdKind::IK_OperatorFunctionId:
case UnqualifiedIdKind::IK_Identifier:
case UnqualifiedIdKind::IK_LiteralOperatorId:
case UnqualifiedIdKind::IK_TemplateId:
T = ConvertDeclSpecToType(state);
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
// Owned declaration is embedded in declarator.
OwnedTagDecl->setEmbeddedInDeclarator(true);
}
break;
case UnqualifiedIdKind::IK_ConstructorName:
case UnqualifiedIdKind::IK_ConstructorTemplateId:
case UnqualifiedIdKind::IK_DestructorName:
// Constructors and destructors don't have return types. Use
// "void" instead.
T = SemaRef.Context.VoidTy;
processTypeAttrs(state, T, TAL_DeclSpec,
D.getMutableDeclSpec().getAttributes());
break;
case UnqualifiedIdKind::IK_DeductionGuideName:
// Deduction guides have a trailing return type and no type in their
// decl-specifier sequence. Use a placeholder return type for now.
T = SemaRef.Context.DependentTy;
break;
case UnqualifiedIdKind::IK_ConversionFunctionId:
// The result type of a conversion function is the type that it
// converts to.
T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
&ReturnTypeInfo);
break;
}
if (!D.getAttributes().empty())
distributeTypeAttrsFromDeclarator(state, T);
// C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
if (DeducedType *Deduced = T->getContainedDeducedType()) {
AutoType *Auto = dyn_cast<AutoType>(Deduced);
int Error = -1;
// Is this a 'auto' or 'decltype(auto)' type (as opposed to __auto_type or
// class template argument deduction)?
bool IsCXXAutoType =
(Auto && Auto->getKeyword() != AutoTypeKeyword::GNUAutoType);
bool IsDeducedReturnType = false;
switch (D.getContext()) {
case DeclaratorContext::LambdaExprContext:
// Declared return type of a lambda-declarator is implicit and is always
// 'auto'.
break;
case DeclaratorContext::ObjCParameterContext:
case DeclaratorContext::ObjCResultContext:
case DeclaratorContext::PrototypeContext:
Error = 0;
break;
case DeclaratorContext::LambdaExprParameterContext:
// In C++14, generic lambdas allow 'auto' in their parameters.
if (!SemaRef.getLangOpts().CPlusPlus14 ||
!Auto || Auto->getKeyword() != AutoTypeKeyword::Auto)
Error = 16;
else {
// If auto is mentioned in a lambda parameter context, convert it to a
// template parameter type.
sema::LambdaScopeInfo *LSI = SemaRef.getCurLambda();
assert(LSI && "No LambdaScopeInfo on the stack!");
const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth;
const unsigned AutoParameterPosition = LSI->TemplateParams.size();
const bool IsParameterPack = D.hasEllipsis();
// Create the TemplateTypeParmDecl here to retrieve the corresponding
// template parameter type. Template parameters are temporarily added
// to the TU until the associated TemplateDecl is created.
TemplateTypeParmDecl *CorrespondingTemplateParam =
TemplateTypeParmDecl::Create(
SemaRef.Context, SemaRef.Context.getTranslationUnitDecl(),
/*KeyLoc*/ SourceLocation(), /*NameLoc*/ D.getBeginLoc(),
TemplateParameterDepth, AutoParameterPosition,
/*Identifier*/ nullptr, false, IsParameterPack);
CorrespondingTemplateParam->setImplicit();
LSI->TemplateParams.push_back(CorrespondingTemplateParam);
// Replace the 'auto' in the function parameter with this invented
// template type parameter.
// FIXME: Retain some type sugar to indicate that this was written
// as 'auto'.
T = state.ReplaceAutoType(
T, QualType(CorrespondingTemplateParam->getTypeForDecl(), 0));
}
break;
case DeclaratorContext::MemberContext: {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
D.isFunctionDeclarator())
break;
bool Cxx = SemaRef.getLangOpts().CPlusPlus;
switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
case TTK_Enum: llvm_unreachable("unhandled tag kind");
case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
case TTK_Class: Error = 5; /* Class member */ break;
case TTK_Interface: Error = 6; /* Interface member */ break;
}
if (D.getDeclSpec().isFriendSpecified())
Error = 20; // Friend type
break;
}
case DeclaratorContext::CXXCatchContext:
case DeclaratorContext::ObjCCatchContext:
Error = 7; // Exception declaration
break;
case DeclaratorContext::TemplateParamContext:
if (isa<DeducedTemplateSpecializationType>(Deduced))
Error = 19; // Template parameter
else if (!SemaRef.getLangOpts().CPlusPlus17)
Error = 8; // Template parameter (until C++17)
break;
case DeclaratorContext::BlockLiteralContext:
Error = 9; // Block literal
break;
case DeclaratorContext::TemplateArgContext:
// Within a template argument list, a deduced template specialization
// type will be reinterpreted as a template template argument.
if (isa<DeducedTemplateSpecializationType>(Deduced) &&
!D.getNumTypeObjects() &&
D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier)
break;
LLVM_FALLTHROUGH;
case DeclaratorContext::TemplateTypeArgContext:
Error = 10; // Template type argument
break;
case DeclaratorContext::AliasDeclContext:
case DeclaratorContext::AliasTemplateContext:
Error = 12; // Type alias
break;
case DeclaratorContext::TrailingReturnContext:
case DeclaratorContext::TrailingReturnVarContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 13; // Function return type
IsDeducedReturnType = true;
break;
case DeclaratorContext::ConversionIdContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 14; // conversion-type-id
IsDeducedReturnType = true;
break;
case DeclaratorContext::FunctionalCastContext:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
LLVM_FALLTHROUGH;
case DeclaratorContext::TypeNameContext:
Error = 15; // Generic
break;
case DeclaratorContext::FileContext:
case DeclaratorContext::BlockContext:
case DeclaratorContext::ForContext:
case DeclaratorContext::InitStmtContext:
case DeclaratorContext::ConditionContext:
// FIXME: P0091R3 (erroneously) does not permit class template argument
// deduction in conditions, for-init-statements, and other declarations
// that are not simple-declarations.
break;
case DeclaratorContext::CXXNewContext:
// FIXME: P0091R3 does not permit class template argument deduction here,
// but we follow GCC and allow it anyway.
if (!IsCXXAutoType && !isa<DeducedTemplateSpecializationType>(Deduced))
Error = 17; // 'new' type
break;
case DeclaratorContext::KNRTypeListContext:
Error = 18; // K&R function parameter
break;
}
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
Error = 11;
// In Objective-C it is an error to use 'auto' on a function declarator
// (and everywhere for '__auto_type').
if (D.isFunctionDeclarator() &&
(!SemaRef.getLangOpts().CPlusPlus11 || !IsCXXAutoType))
Error = 13;
bool HaveTrailing = false;
// C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
// contains a trailing return type. That is only legal at the outermost
// level. Check all declarator chunks (outermost first) anyway, to give
// better diagnostics.
// We don't support '__auto_type' with trailing return types.
// FIXME: Should we only do this for 'auto' and not 'decltype(auto)'?
if (SemaRef.getLangOpts().CPlusPlus11 && IsCXXAutoType &&
D.hasTrailingReturnType()) {
HaveTrailing = true;
Error = -1;
}
SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc();
if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId)
AutoRange = D.getName().getSourceRange();
if (Error != -1) {
unsigned Kind;
if (Auto) {
switch (Auto->getKeyword()) {
case AutoTypeKeyword::Auto: Kind = 0; break;
case AutoTypeKeyword::DecltypeAuto: Kind = 1; break;
case AutoTypeKeyword::GNUAutoType: Kind = 2; break;
}
} else {
assert(isa<DeducedTemplateSpecializationType>(Deduced) &&
"unknown auto type");
Kind = 3;
}
auto *DTST = dyn_cast<DeducedTemplateSpecializationType>(Deduced);
TemplateName TN = DTST ? DTST->getTemplateName() : TemplateName();
SemaRef.Diag(AutoRange.getBegin(), diag::err_auto_not_allowed)
<< Kind << Error << (int)SemaRef.getTemplateNameKindForDiagnostics(TN)
<< QualType(Deduced, 0) << AutoRange;
if (auto *TD = TN.getAsTemplateDecl())
SemaRef.Diag(TD->getLocation(), diag::note_template_decl_here);
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
} else if (!HaveTrailing &&
D.getContext() != DeclaratorContext::LambdaExprContext) {
// If there was a trailing return type, we already got
// warn_cxx98_compat_trailing_return_type in the parser.
SemaRef.Diag(AutoRange.getBegin(),
D.getContext() ==
DeclaratorContext::LambdaExprParameterContext
? diag::warn_cxx11_compat_generic_lambda
: IsDeducedReturnType
? diag::warn_cxx11_compat_deduced_return_type
: diag::warn_cxx98_compat_auto_type_specifier)
<< AutoRange;
}
}
if (SemaRef.getLangOpts().CPlusPlus &&
OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) {
// Check the contexts where C++ forbids the declaration of a new class
// or enumeration in a type-specifier-seq.
unsigned DiagID = 0;
switch (D.getContext()) {
case DeclaratorContext::TrailingReturnContext:
case DeclaratorContext::TrailingReturnVarContext:
// Class and enumeration definitions are syntactically not allowed in
// trailing return types.
llvm_unreachable("parser should not have allowed this");
break;
case DeclaratorContext::FileContext:
case DeclaratorContext::MemberContext:
case DeclaratorContext::BlockContext:
case DeclaratorContext::ForContext:
case DeclaratorContext::InitStmtContext:
case DeclaratorContext::BlockLiteralContext:
case DeclaratorContext::LambdaExprContext:
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration unless
// it appears in the type-id of an alias-declaration (7.1.3) that is not
// the declaration of a template-declaration.
case DeclaratorContext::AliasDeclContext:
break;
case DeclaratorContext::AliasTemplateContext:
DiagID = diag::err_type_defined_in_alias_template;
break;
case DeclaratorContext::TypeNameContext:
case DeclaratorContext::FunctionalCastContext:
case DeclaratorContext::ConversionIdContext:
case DeclaratorContext::TemplateParamContext:
case DeclaratorContext::CXXNewContext:
case DeclaratorContext::CXXCatchContext:
case DeclaratorContext::ObjCCatchContext:
case DeclaratorContext::TemplateArgContext:
case DeclaratorContext::TemplateTypeArgContext:
DiagID = diag::err_type_defined_in_type_specifier;
break;
case DeclaratorContext::PrototypeContext:
case DeclaratorContext::LambdaExprParameterContext:
case DeclaratorContext::ObjCParameterContext:
case DeclaratorContext::ObjCResultContext:
case DeclaratorContext::KNRTypeListContext:
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
DiagID = diag::err_type_defined_in_param_type;
break;
case DeclaratorContext::ConditionContext:
// C++ 6.4p2:
// The type-specifier-seq shall not contain typedef and shall not declare
// a new class or enumeration.
DiagID = diag::err_type_defined_in_condition;
break;
}
if (DiagID != 0) {
SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID)
<< SemaRef.Context.getTypeDeclType(OwnedTagDecl);
D.setInvalidType(true);
}
}
assert(!T.isNull() && "This function should not return a null type");
return T;
}
/// Produce an appropriate diagnostic for an ambiguity between a function
/// declarator and a C++ direct-initializer.
static void warnAboutAmbiguousFunction(Sema &S, Declarator &D,
DeclaratorChunk &DeclType, QualType RT) {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
assert(FTI.isAmbiguous && "no direct-initializer / function ambiguity");
// If the return type is void there is no ambiguity.
if (RT->isVoidType())
return;
// An initializer for a non-class type can have at most one argument.
if (!RT->isRecordType() && FTI.NumParams > 1)
return;
// An initializer for a reference must have exactly one argument.
if (RT->isReferenceType() && FTI.NumParams != 1)
return;
// Only warn if this declarator is declaring a function at block scope, and
// doesn't have a storage class (such as 'extern') specified.
if (!D.isFunctionDeclarator() ||
D.getFunctionDefinitionKind() != FDK_Declaration ||
!S.CurContext->isFunctionOrMethod() ||
D.getDeclSpec().getStorageClassSpec()
!= DeclSpec::SCS_unspecified)
return;
// Inside a condition, a direct initializer is not permitted. We allow one to
// be parsed in order to give better diagnostics in condition parsing.
if (D.getContext() == DeclaratorContext::ConditionContext)
return;
SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
S.Diag(DeclType.Loc,
FTI.NumParams ? diag::warn_parens_disambiguated_as_function_declaration
: diag::warn_empty_parens_are_function_decl)
<< ParenRange;
// If the declaration looks like:
// T var1,
// f();
// and name lookup finds a function named 'f', then the ',' was
// probably intended to be a ';'.
if (!D.isFirstDeclarator() && D.getIdentifier()) {
FullSourceLoc Comma(D.getCommaLoc(), S.SourceMgr);
FullSourceLoc Name(D.getIdentifierLoc(), S.SourceMgr);
if (Comma.getFileID() != Name.getFileID() ||
Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
LookupResult Result(S, D.getIdentifier(), SourceLocation(),
Sema::LookupOrdinaryName);
if (S.LookupName(Result, S.getCurScope()))
S.Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
<< FixItHint::CreateReplacement(D.getCommaLoc(), ";")
<< D.getIdentifier();
Result.suppressDiagnostics();
}
}
if (FTI.NumParams > 0) {
// For a declaration with parameters, eg. "T var(T());", suggest adding
// parens around the first parameter to turn the declaration into a
// variable declaration.
SourceRange Range = FTI.Params[0].Param->getSourceRange();
SourceLocation B = Range.getBegin();
SourceLocation E = S.getLocForEndOfToken(Range.getEnd());
// FIXME: Maybe we should suggest adding braces instead of parens
// in C++11 for classes that don't have an initializer_list constructor.
S.Diag(B, diag::note_additional_parens_for_variable_declaration)
<< FixItHint::CreateInsertion(B, "(")
<< FixItHint::CreateInsertion(E, ")");
} else {
// For a declaration without parameters, eg. "T var();", suggest replacing
// the parens with an initializer to turn the declaration into a variable
// declaration.
const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
// Empty parens mean value-initialization, and no parens mean
// default initialization. These are equivalent if the default
// constructor is user-provided or if zero-initialization is a
// no-op.
if (RD && RD->hasDefinition() &&
(RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
S.Diag(DeclType.Loc, diag::note_empty_parens_default_ctor)
<< FixItHint::CreateRemoval(ParenRange);
else {
std::string Init =
S.getFixItZeroInitializerForType(RT, ParenRange.getBegin());
if (Init.empty() && S.LangOpts.CPlusPlus11)
Init = "{}";
if (!Init.empty())
S.Diag(DeclType.Loc, diag::note_empty_parens_zero_initialize)
<< FixItHint::CreateReplacement(ParenRange, Init);
}
}
}
/// Produce an appropriate diagnostic for a declarator with top-level
/// parentheses.
static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
DeclaratorChunk &Paren = D.getTypeObject(D.getNumTypeObjects() - 1);
assert(Paren.Kind == DeclaratorChunk::Paren &&
"do not have redundant top-level parentheses");
// This is a syntactic check; we're not interested in cases that arise
// during template instantiation.
if (S.inTemplateInstantiation())
return;
// Check whether this could be intended to be a construction of a temporary
// object in C++ via a function-style cast.
bool CouldBeTemporaryObject =
S.getLangOpts().CPlusPlus && D.isExpressionContext() &&
!D.isInvalidType() && D.getIdentifier() &&
D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier &&
(T->isRecordType() || T->isDependentType()) &&
D.getDeclSpec().getTypeQualifiers() == 0 && D.isFirstDeclarator();
bool StartsWithDeclaratorId = true;
for (auto &C : D.type_objects()) {
switch (C.Kind) {
case DeclaratorChunk::Paren:
if (&C == &Paren)
continue;
LLVM_FALLTHROUGH;
case DeclaratorChunk::Pointer:
StartsWithDeclaratorId = false;
continue;
case DeclaratorChunk::Array:
if (!C.Arr.NumElts)
CouldBeTemporaryObject = false;
continue;
case DeclaratorChunk::Reference:
// FIXME: Suppress the warning here if there is no initializer; we're
// going to give an error anyway.
// We assume that something like 'T (&x) = y;' is highly likely to not
// be intended to be a temporary object.
CouldBeTemporaryObject = false;
StartsWithDeclaratorId = false;
continue;
case DeclaratorChunk::Function:
// In a new-type-id, function chunks require parentheses.
if (D.getContext() == DeclaratorContext::CXXNewContext)
return;
// FIXME: "A(f())" deserves a vexing-parse warning, not just a
// redundant-parens warning, but we don't know whether the function
// chunk was syntactically valid as an expression here.
CouldBeTemporaryObject = false;
continue;
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
// These cannot appear in expressions.
CouldBeTemporaryObject = false;
StartsWithDeclaratorId = false;
continue;
}
}
// FIXME: If there is an initializer, assume that this is not intended to be
// a construction of a temporary object.
// Check whether the name has already been declared; if not, this is not a
// function-style cast.
if (CouldBeTemporaryObject) {
LookupResult Result(S, D.getIdentifier(), SourceLocation(),
Sema::LookupOrdinaryName);
if (!S.LookupName(Result, S.getCurScope()))
CouldBeTemporaryObject = false;
Result.suppressDiagnostics();
}
SourceRange ParenRange(Paren.Loc, Paren.EndLoc);
if (!CouldBeTemporaryObject) {
// If we have A (::B), the parentheses affect the meaning of the program.
// Suppress the warning in that case. Don't bother looking at the DeclSpec
// here: even (e.g.) "int ::x" is visually ambiguous even though it's
// formally unambiguous.
if (StartsWithDeclaratorId && D.getCXXScopeSpec().isValid()) {
for (NestedNameSpecifier *NNS = D.getCXXScopeSpec().getScopeRep(); NNS;
NNS = NNS->getPrefix()) {
if (NNS->getKind() == NestedNameSpecifier::Global)
return;
}
}
S.Diag(Paren.Loc, diag::warn_redundant_parens_around_declarator)
<< ParenRange << FixItHint::CreateRemoval(Paren.Loc)
<< FixItHint::CreateRemoval(Paren.EndLoc);
return;
}
S.Diag(Paren.Loc, diag::warn_parens_disambiguated_as_variable_declaration)
<< ParenRange << D.getIdentifier();
auto *RD = T->getAsCXXRecordDecl();
if (!RD || !RD->hasDefinition() || RD->hasNonTrivialDestructor())
S.Diag(Paren.Loc, diag::note_raii_guard_add_name)
<< FixItHint::CreateInsertion(Paren.Loc, " varname") << T
<< D.getIdentifier();
// FIXME: A cast to void is probably a better suggestion in cases where it's
// valid (when there is no initializer and we're not in a condition).
S.Diag(D.getBeginLoc(), diag::note_function_style_cast_add_parentheses)
<< FixItHint::CreateInsertion(D.getBeginLoc(), "(")
<< FixItHint::CreateInsertion(S.getLocForEndOfToken(D.getEndLoc()), ")");
S.Diag(Paren.Loc, diag::note_remove_parens_for_variable_declaration)
<< FixItHint::CreateRemoval(Paren.Loc)
<< FixItHint::CreateRemoval(Paren.EndLoc);
}
/// Helper for figuring out the default CC for a function declarator type. If
/// this is the outermost chunk, then we can determine the CC from the
/// declarator context. If not, then this could be either a member function
/// type or normal function type.
static CallingConv getCCForDeclaratorChunk(
Sema &S, Declarator &D, const ParsedAttributesView &AttrList,
const DeclaratorChunk::FunctionTypeInfo &FTI, unsigned ChunkIndex) {
assert(D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function);
// Check for an explicit CC attribute.
for (const ParsedAttr &AL : AttrList) {
switch (AL.getKind()) {
CALLING_CONV_ATTRS_CASELIST : {
// Ignore attributes that don't validate or can't apply to the
// function type. We'll diagnose the failure to apply them in
// handleFunctionTypeAttr.
CallingConv CC;
if (!S.CheckCallingConvAttr(AL, CC) &&
(!FTI.isVariadic || supportsVariadicCall(CC))) {
return CC;
}
break;
}
default:
break;
}
}
bool IsCXXInstanceMethod = false;
if (S.getLangOpts().CPlusPlus) {
// Look inwards through parentheses to see if this chunk will form a
// member pointer type or if we're the declarator. Any type attributes
// between here and there will override the CC we choose here.
unsigned I = ChunkIndex;
bool FoundNonParen = false;
while (I && !FoundNonParen) {
--I;
if (D.getTypeObject(I).Kind != DeclaratorChunk::Paren)
FoundNonParen = true;
}
if (FoundNonParen) {
// If we're not the declarator, we're a regular function type unless we're
// in a member pointer.
IsCXXInstanceMethod =
D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer;
} else if (D.getContext() == DeclaratorContext::LambdaExprContext) {
// This can only be a call operator for a lambda, which is an instance
// method.
IsCXXInstanceMethod = true;
} else {
// We're the innermost decl chunk, so must be a function declarator.
assert(D.isFunctionDeclarator());
// If we're inside a record, we're declaring a method, but it could be
// explicitly or implicitly static.
IsCXXInstanceMethod =
D.isFirstDeclarationOfMember() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
!D.isStaticMember();
}
}
CallingConv CC = S.Context.getDefaultCallingConvention(FTI.isVariadic,
IsCXXInstanceMethod);
// Attribute AT_OpenCLKernel affects the calling convention for SPIR
// and AMDGPU targets, hence it cannot be treated as a calling
// convention attribute. This is the simplest place to infer
// calling convention for OpenCL kernels.
if (S.getLangOpts().OpenCL) {
for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
if (AL.getKind() == ParsedAttr::AT_OpenCLKernel) {
CC = CC_OpenCLKernel;
break;
}
}
}
return CC;
}
namespace {
/// A simple notion of pointer kinds, which matches up with the various
/// pointer declarators.
enum class SimplePointerKind {
Pointer,
BlockPointer,
MemberPointer,
Array,
};
} // end anonymous namespace
IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) {
switch (nullability) {
case NullabilityKind::NonNull:
if (!Ident__Nonnull)
Ident__Nonnull = PP.getIdentifierInfo("_Nonnull");
return Ident__Nonnull;
case NullabilityKind::Nullable:
if (!Ident__Nullable)
Ident__Nullable = PP.getIdentifierInfo("_Nullable");
return Ident__Nullable;
case NullabilityKind::Unspecified:
if (!Ident__Null_unspecified)
Ident__Null_unspecified = PP.getIdentifierInfo("_Null_unspecified");
return Ident__Null_unspecified;
}
llvm_unreachable("Unknown nullability kind.");
}
/// Retrieve the identifier "NSError".
IdentifierInfo *Sema::getNSErrorIdent() {
if (!Ident_NSError)
Ident_NSError = PP.getIdentifierInfo("NSError");
return Ident_NSError;
}
/// Check whether there is a nullability attribute of any kind in the given
/// attribute list.
static bool hasNullabilityAttr(const ParsedAttributesView &attrs) {
for (const ParsedAttr &AL : attrs) {
if (AL.getKind() == ParsedAttr::AT_TypeNonNull ||
AL.getKind() == ParsedAttr::AT_TypeNullable ||
AL.getKind() == ParsedAttr::AT_TypeNullUnspecified)
return true;
}
return false;
}
namespace {
/// Describes the kind of a pointer a declarator describes.
enum class PointerDeclaratorKind {
// Not a pointer.
NonPointer,
// Single-level pointer.
SingleLevelPointer,
// Multi-level pointer (of any pointer kind).
MultiLevelPointer,
// CFFooRef*
MaybePointerToCFRef,
// CFErrorRef*
CFErrorRefPointer,
// NSError**
NSErrorPointerPointer,
};
/// Describes a declarator chunk wrapping a pointer that marks inference as
/// unexpected.
// These values must be kept in sync with diagnostics.
enum class PointerWrappingDeclaratorKind {
/// Pointer is top-level.
None = -1,
/// Pointer is an array element.
Array = 0,
/// Pointer is the referent type of a C++ reference.
Reference = 1
};
} // end anonymous namespace
/// Classify the given declarator, whose type-specified is \c type, based on
/// what kind of pointer it refers to.
///
/// This is used to determine the default nullability.
static PointerDeclaratorKind
classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
PointerWrappingDeclaratorKind &wrappingKind) {
unsigned numNormalPointers = 0;
// For any dependent type, we consider it a non-pointer.
if (type->isDependentType())
return PointerDeclaratorKind::NonPointer;
// Look through the declarator chunks to identify pointers.
for (unsigned i = 0, n = declarator.getNumTypeObjects(); i != n; ++i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i);
switch (chunk.Kind) {
case DeclaratorChunk::Array:
if (numNormalPointers == 0)
wrappingKind = PointerWrappingDeclaratorKind::Array;
break;
case DeclaratorChunk::Function:
case DeclaratorChunk::Pipe:
break;
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
: PointerDeclaratorKind::SingleLevelPointer;
case DeclaratorChunk::Paren:
break;
case DeclaratorChunk::Reference:
if (numNormalPointers == 0)
wrappingKind = PointerWrappingDeclaratorKind::Reference;
break;
case DeclaratorChunk::Pointer:
++numNormalPointers;
if (numNormalPointers > 2)
return PointerDeclaratorKind::MultiLevelPointer;
break;
}
}
// Then, dig into the type specifier itself.
unsigned numTypeSpecifierPointers = 0;
do {
// Decompose normal pointers.
if (auto ptrType = type->getAs<PointerType>()) {
++numNormalPointers;
if (numNormalPointers > 2)
return PointerDeclaratorKind::MultiLevelPointer;
type = ptrType->getPointeeType();
++numTypeSpecifierPointers;
continue;
}
// Decompose block pointers.
if (type->getAs<BlockPointerType>()) {
return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
: PointerDeclaratorKind::SingleLevelPointer;
}
// Decompose member pointers.
if (type->getAs<MemberPointerType>()) {
return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
: PointerDeclaratorKind::SingleLevelPointer;
}
// Look at Objective-C object pointers.
if (auto objcObjectPtr = type->getAs<ObjCObjectPointerType>()) {
++numNormalPointers;
++numTypeSpecifierPointers;
// If this is NSError**, report that.
if (auto objcClassDecl = objcObjectPtr->getInterfaceDecl()) {
if (objcClassDecl->getIdentifier() == S.getNSErrorIdent() &&
numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
return PointerDeclaratorKind::NSErrorPointerPointer;
}
}
break;
}
// Look at Objective-C class types.
if (auto objcClass = type->getAs<ObjCInterfaceType>()) {
if (objcClass->getInterface()->getIdentifier() == S.getNSErrorIdent()) {
if (numNormalPointers == 2 && numTypeSpecifierPointers < 2)
return PointerDeclaratorKind::NSErrorPointerPointer;
}
break;
}
// If at this point we haven't seen a pointer, we won't see one.
if (numNormalPointers == 0)
return PointerDeclaratorKind::NonPointer;
if (auto recordType = type->getAs<RecordType>()) {
RecordDecl *recordDecl = recordType->getDecl();
bool isCFError = false;
if (S.CFError) {
// If we already know about CFError, test it directly.
isCFError = (S.CFError == recordDecl);
} else {
// Check whether this is CFError, which we identify based on its bridge
// to NSError. CFErrorRef used to be declared with "objc_bridge" but is
// now declared with "objc_bridge_mutable", so look for either one of
// the two attributes.
if (recordDecl->getTagKind() == TTK_Struct && numNormalPointers > 0) {
IdentifierInfo *bridgedType = nullptr;
if (auto bridgeAttr = recordDecl->getAttr<ObjCBridgeAttr>())
bridgedType = bridgeAttr->getBridgedType();
else if (auto bridgeAttr =
recordDecl->getAttr<ObjCBridgeMutableAttr>())
bridgedType = bridgeAttr->getBridgedType();
if (bridgedType == S.getNSErrorIdent()) {
S.CFError = recordDecl;
isCFError = true;
}
}
}
// If this is CFErrorRef*, report it as such.
if (isCFError && numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
return PointerDeclaratorKind::CFErrorRefPointer;
}
break;
}
break;
} while (true);
switch (numNormalPointers) {
case 0:
return PointerDeclaratorKind::NonPointer;
case 1:
return PointerDeclaratorKind::SingleLevelPointer;
case 2:
return PointerDeclaratorKind::MaybePointerToCFRef;
default:
return PointerDeclaratorKind::MultiLevelPointer;
}
}
static FileID getNullabilityCompletenessCheckFileID(Sema &S,
SourceLocation loc) {
// If we're anywhere in a function, method, or closure context, don't perform
// completeness checks.
for (DeclContext *ctx = S.CurContext; ctx; ctx = ctx->getParent()) {
if (ctx->isFunctionOrMethod())
return FileID();
if (ctx->isFileContext())
break;
}
// We only care about the expansion location.
loc = S.SourceMgr.getExpansionLoc(loc);
FileID file = S.SourceMgr.getFileID(loc);
if (file.isInvalid())
return FileID();
// Retrieve file information.
bool invalid = false;
const SrcMgr::SLocEntry &sloc = S.SourceMgr.getSLocEntry(file, &invalid);
if (invalid || !sloc.isFile())
return FileID();
// We don't want to perform completeness checks on the main file or in
// system headers.
const SrcMgr::FileInfo &fileInfo = sloc.getFile();
if (fileInfo.getIncludeLoc().isInvalid())
return FileID();
if (fileInfo.getFileCharacteristic() != SrcMgr::C_User &&
S.Diags.getSuppressSystemWarnings()) {
return FileID();
}
return file;
}
/// Creates a fix-it to insert a C-style nullability keyword at \p pointerLoc,
/// taking into account whitespace before and after.
static void fixItNullability(Sema &S, DiagnosticBuilder &Diag,
SourceLocation PointerLoc,
NullabilityKind Nullability) {
assert(PointerLoc.isValid());
if (PointerLoc.isMacroID())
return;
SourceLocation FixItLoc = S.getLocForEndOfToken(PointerLoc);
if (!FixItLoc.isValid() || FixItLoc == PointerLoc)
return;
const char *NextChar = S.SourceMgr.getCharacterData(FixItLoc);
if (!NextChar)
return;
SmallString<32> InsertionTextBuf{" "};
InsertionTextBuf += getNullabilitySpelling(Nullability);
InsertionTextBuf += " ";
StringRef InsertionText = InsertionTextBuf.str();
if (isWhitespace(*NextChar)) {
InsertionText = InsertionText.drop_back();
} else if (NextChar[-1] == '[') {
if (NextChar[0] == ']')
InsertionText = InsertionText.drop_back().drop_front();
else
InsertionText = InsertionText.drop_front();
} else if (!isIdentifierBody(NextChar[0], /*allow dollar*/true) &&
!isIdentifierBody(NextChar[-1], /*allow dollar*/true)) {
InsertionText = InsertionText.drop_back().drop_front();
}
Diag << FixItHint::CreateInsertion(FixItLoc, InsertionText);
}
static void emitNullabilityConsistencyWarning(Sema &S,
SimplePointerKind PointerKind,
SourceLocation PointerLoc,
SourceLocation PointerEndLoc) {
assert(PointerLoc.isValid());
if (PointerKind == SimplePointerKind::Array) {
S.Diag(PointerLoc, diag::warn_nullability_missing_array);
} else {
S.Diag(PointerLoc, diag::warn_nullability_missing)
<< static_cast<unsigned>(PointerKind);
}
auto FixItLoc = PointerEndLoc.isValid() ? PointerEndLoc : PointerLoc;
if (FixItLoc.isMacroID())
return;
auto addFixIt = [&](NullabilityKind Nullability) {
auto Diag = S.Diag(FixItLoc, diag::note_nullability_fix_it);
Diag << static_cast<unsigned>(Nullability);
Diag << static_cast<unsigned>(PointerKind);
fixItNullability(S, Diag, FixItLoc, Nullability);
};
addFixIt(NullabilityKind::Nullable);
addFixIt(NullabilityKind::NonNull);
}
/// Complains about missing nullability if the file containing \p pointerLoc
/// has other uses of nullability (either the keywords or the \c assume_nonnull
/// pragma).
///
/// If the file has \e not seen other uses of nullability, this particular
/// pointer is saved for possible later diagnosis. See recordNullabilitySeen().
static void
checkNullabilityConsistency(Sema &S, SimplePointerKind pointerKind,
SourceLocation pointerLoc,
SourceLocation pointerEndLoc = SourceLocation()) {
// Determine which file we're performing consistency checking for.
FileID file = getNullabilityCompletenessCheckFileID(S, pointerLoc);
if (file.isInvalid())
return;
// If we haven't seen any type nullability in this file, we won't warn now
// about anything.
FileNullability &fileNullability = S.NullabilityMap[file];
if (!fileNullability.SawTypeNullability) {
// If this is the first pointer declarator in the file, and the appropriate
// warning is on, record it in case we need to diagnose it retroactively.
diag::kind diagKind;
if (pointerKind == SimplePointerKind::Array)
diagKind = diag::warn_nullability_missing_array;
else
diagKind = diag::warn_nullability_missing;
if (fileNullability.PointerLoc.isInvalid() &&
!S.Context.getDiagnostics().isIgnored(diagKind, pointerLoc)) {
fileNullability.PointerLoc = pointerLoc;
fileNullability.PointerEndLoc = pointerEndLoc;
fileNullability.PointerKind = static_cast<unsigned>(pointerKind);
}
return;
}
// Complain about missing nullability.
emitNullabilityConsistencyWarning(S, pointerKind, pointerLoc, pointerEndLoc);
}
/// Marks that a nullability feature has been used in the file containing
/// \p loc.
///
/// If this file already had pointer types in it that were missing nullability,
/// the first such instance is retroactively diagnosed.
///
/// \sa checkNullabilityConsistency
static void recordNullabilitySeen(Sema &S, SourceLocation loc) {
FileID file = getNullabilityCompletenessCheckFileID(S, loc);
if (file.isInvalid())
return;
FileNullability &fileNullability = S.NullabilityMap[file];
if (fileNullability.SawTypeNullability)
return;
fileNullability.SawTypeNullability = true;
// If we haven't seen any type nullability before, now we have. Retroactively
// diagnose the first unannotated pointer, if there was one.
if (fileNullability.PointerLoc.isInvalid())
return;
auto kind = static_cast<SimplePointerKind>(fileNullability.PointerKind);
emitNullabilityConsistencyWarning(S, kind, fileNullability.PointerLoc,
fileNullability.PointerEndLoc);
}
/// Returns true if any of the declarator chunks before \p endIndex include a
/// level of indirection: array, pointer, reference, or pointer-to-member.
///
/// Because declarator chunks are stored in outer-to-inner order, testing
/// every chunk before \p endIndex is testing all chunks that embed the current
/// chunk as part of their type.
///
/// It is legal to pass the result of Declarator::getNumTypeObjects() as the
/// end index, in which case all chunks are tested.
static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
unsigned i = endIndex;
while (i != 0) {
// Walk outwards along the declarator chunks.
--i;
const DeclaratorChunk &DC = D.getTypeObject(i);
switch (DC.Kind) {
case DeclaratorChunk::Paren:
break;
case DeclaratorChunk::Array:
case DeclaratorChunk::Pointer:
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
return true;
case DeclaratorChunk::Function:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::Pipe:
// These are invalid anyway, so just ignore.
break;
}
}
return false;
}
static bool IsNoDerefableChunk(DeclaratorChunk Chunk) {
return (Chunk.Kind == DeclaratorChunk::Pointer ||
Chunk.Kind == DeclaratorChunk::Array);
}
template<typename AttrT>
static AttrT *createSimpleAttr(ASTContext &Ctx, ParsedAttr &Attr) {
Attr.setUsedAsTypeAttr();
return ::new (Ctx)
AttrT(Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex());
}
static Attr *createNullabilityAttr(ASTContext &Ctx, ParsedAttr &Attr,
NullabilityKind NK) {
switch (NK) {
case NullabilityKind::NonNull:
return createSimpleAttr<TypeNonNullAttr>(Ctx, Attr);
case NullabilityKind::Nullable:
return createSimpleAttr<TypeNullableAttr>(Ctx, Attr);
case NullabilityKind::Unspecified:
return createSimpleAttr<TypeNullUnspecifiedAttr>(Ctx, Attr);
}
llvm_unreachable("unknown NullabilityKind");
}
// Diagnose whether this is a case with the multiple addr spaces.
// Returns true if this is an invalid case.
// ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified
// by qualifiers for two or more different address spaces."
static bool DiagnoseMultipleAddrSpaceAttributes(Sema &S, LangAS ASOld,
LangAS ASNew,
SourceLocation AttrLoc) {
if (ASOld != LangAS::Default) {
if (ASOld != ASNew) {
S.Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
return true;
}
// Emit a warning if they are identical; it's likely unintended.
S.Diag(AttrLoc,
diag::warn_attribute_address_multiple_identical_qualifiers);
}
return false;
}
static TypeSourceInfo *
GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
QualType T, TypeSourceInfo *ReturnTypeInfo);
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
// The TypeSourceInfo that this function returns will not be a null type.
// If there is an error, this function will fill in a dummy type as fallback.
QualType T = declSpecType;
Declarator &D = state.getDeclarator();
Sema &S = state.getSema();
ASTContext &Context = S.Context;
const LangOptions &LangOpts = S.getLangOpts();
// The name we're declaring, if any.
DeclarationName Name;
if (D.getIdentifier())
Name = D.getIdentifier();
// Does this declaration declare a typedef-name?
bool IsTypedefName =
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
D.getContext() == DeclaratorContext::AliasDeclContext ||
D.getContext() == DeclaratorContext::AliasTemplateContext;
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
(!T->castAs<FunctionProtoType>()->getMethodQuals().empty() ||
T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
// If T is 'decltype(auto)', the only declarators we can have are parens
// and at most one function declarator if this is a function declaration.
// If T is a deduced class template specialization type, we can have no
// declarator chunks at all.
if (auto *DT = T->getAs<DeducedType>()) {
const AutoType *AT = T->getAs<AutoType>();
bool IsClassTemplateDeduction = isa<DeducedTemplateSpecializationType>(DT);
if ((AT && AT->isDecltypeAuto()) || IsClassTemplateDeduction) {
for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
unsigned Index = E - I - 1;
DeclaratorChunk &DeclChunk = D.getTypeObject(Index);
unsigned DiagId = IsClassTemplateDeduction
? diag::err_deduced_class_template_compound_type
: diag::err_decltype_auto_compound_type;
unsigned DiagKind = 0;
switch (DeclChunk.Kind) {
case DeclaratorChunk::Paren:
// FIXME: Rejecting this is a little silly.
if (IsClassTemplateDeduction) {
DiagKind = 4;
break;
}
continue;
case DeclaratorChunk::Function: {
if (IsClassTemplateDeduction) {
DiagKind = 3;
break;
}
unsigned FnIndex;
if (D.isFunctionDeclarationContext() &&
D.isFunctionDeclarator(FnIndex) && FnIndex == Index)
continue;
DiagId = diag::err_decltype_auto_function_declarator_not_declaration;
break;
}
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
DiagKind = 0;
break;
case DeclaratorChunk::Reference:
DiagKind = 1;
break;
case DeclaratorChunk::Array:
DiagKind = 2;
break;
case DeclaratorChunk::Pipe:
break;
}
S.Diag(DeclChunk.Loc, DiagId) << DiagKind;
D.setInvalidType(true);
break;
}
}
}
// Determine whether we should infer _Nonnull on pointer types.
Optional<NullabilityKind> inferNullability;
bool inferNullabilityCS = false;
bool inferNullabilityInnerOnly = false;
bool inferNullabilityInnerOnlyComplete = false;
// Are we in an assume-nonnull region?
bool inAssumeNonNullRegion = false;
SourceLocation assumeNonNullLoc = S.PP.getPragmaAssumeNonNullLoc();
if (assumeNonNullLoc.isValid()) {
inAssumeNonNullRegion = true;
recordNullabilitySeen(S, assumeNonNullLoc);
}
// Whether to complain about missing nullability specifiers or not.
enum {
/// Never complain.
CAMN_No,
/// Complain on the inner pointers (but not the outermost
/// pointer).
CAMN_InnerPointers,
/// Complain about any pointers that don't have nullability
/// specified or inferred.
CAMN_Yes
} complainAboutMissingNullability = CAMN_No;
unsigned NumPointersRemaining = 0;
auto complainAboutInferringWithinChunk = PointerWrappingDeclaratorKind::None;
if (IsTypedefName) {
// For typedefs, we do not infer any nullability (the default),
// and we only complain about missing nullability specifiers on
// inner pointers.
complainAboutMissingNullability = CAMN_InnerPointers;
if (T->canHaveNullability(/*ResultIfUnknown*/false) &&
!T->getNullability(S.Context)) {
// Note that we allow but don't require nullability on dependent types.
++NumPointersRemaining;
}
for (unsigned i = 0, n = D.getNumTypeObjects(); i != n; ++i) {
DeclaratorChunk &chunk = D.getTypeObject(i);
switch (chunk.Kind) {
case DeclaratorChunk::Array:
case DeclaratorChunk::Function:
case DeclaratorChunk::Pipe:
break;
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
++NumPointersRemaining;
break;
case DeclaratorChunk::Paren:
case DeclaratorChunk::Reference:
continue;
case DeclaratorChunk::Pointer:
++NumPointersRemaining;
continue;
}
}
} else {
bool isFunctionOrMethod = false;
switch (auto context = state.getDeclarator().getContext()) {
case DeclaratorContext::ObjCParameterContext:
case DeclaratorContext::ObjCResultContext:
case DeclaratorContext::PrototypeContext:
case DeclaratorContext::TrailingReturnContext:
case DeclaratorContext::TrailingReturnVarContext:
isFunctionOrMethod = true;
LLVM_FALLTHROUGH;
case DeclaratorContext::MemberContext:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
complainAboutMissingNullability = CAMN_No;
break;
}
// Weak properties are inferred to be nullable.
if (state.getDeclarator().isObjCWeakProperty() && inAssumeNonNullRegion) {
inferNullability = NullabilityKind::Nullable;
break;
}
LLVM_FALLTHROUGH;
case DeclaratorContext::FileContext:
case DeclaratorContext::KNRTypeListContext: {
complainAboutMissingNullability = CAMN_Yes;
// Nullability inference depends on the type and declarator.
auto wrappingKind = PointerWrappingDeclaratorKind::None;
switch (classifyPointerDeclarator(S, T, D, wrappingKind)) {
case PointerDeclaratorKind::NonPointer:
case PointerDeclaratorKind::MultiLevelPointer:
// Cannot infer nullability.
break;
case PointerDeclaratorKind::SingleLevelPointer:
// Infer _Nonnull if we are in an assumes-nonnull region.
if (inAssumeNonNullRegion) {
complainAboutInferringWithinChunk = wrappingKind;
inferNullability = NullabilityKind::NonNull;
inferNullabilityCS =
(context == DeclaratorContext::ObjCParameterContext ||
context == DeclaratorContext::ObjCResultContext);
}
break;
case PointerDeclaratorKind::CFErrorRefPointer:
case PointerDeclaratorKind::NSErrorPointerPointer:
// Within a function or method signature, infer _Nullable at both
// levels.
if (isFunctionOrMethod && inAssumeNonNullRegion)
inferNullability = NullabilityKind::Nullable;
break;
case PointerDeclaratorKind::MaybePointerToCFRef:
if (isFunctionOrMethod) {
// On pointer-to-pointer parameters marked cf_returns_retained or
// cf_returns_not_retained, if the outer pointer is explicit then
// infer the inner pointer as _Nullable.
auto hasCFReturnsAttr =
[](const ParsedAttributesView &AttrList) -> bool {
return AttrList.hasAttribute(ParsedAttr::AT_CFReturnsRetained) ||
AttrList.hasAttribute(ParsedAttr::AT_CFReturnsNotRetained);
};
if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) {
if (hasCFReturnsAttr(D.getAttributes()) ||
hasCFReturnsAttr(InnermostChunk->getAttrs()) ||
hasCFReturnsAttr(D.getDeclSpec().getAttributes())) {
inferNullability = NullabilityKind::Nullable;
inferNullabilityInnerOnly = true;
}
}
}
break;
}
break;
}
case DeclaratorContext::ConversionIdContext:
complainAboutMissingNullability = CAMN_Yes;
break;
case DeclaratorContext::AliasDeclContext:
case DeclaratorContext::AliasTemplateContext:
case DeclaratorContext::BlockContext:
case DeclaratorContext::BlockLiteralContext:
case DeclaratorContext::ConditionContext:
case DeclaratorContext::CXXCatchContext:
case DeclaratorContext::CXXNewContext:
case DeclaratorContext::ForContext:
case DeclaratorContext::InitStmtContext:
case DeclaratorContext::LambdaExprContext:
case DeclaratorContext::LambdaExprParameterContext:
case DeclaratorContext::ObjCCatchContext:
case DeclaratorContext::TemplateParamContext:
case DeclaratorContext::TemplateArgContext:
case DeclaratorContext::TemplateTypeArgContext:
case DeclaratorContext::TypeNameContext:
case DeclaratorContext::FunctionalCastContext:
// Don't infer in these contexts.
break;
}
}
// Local function that returns true if its argument looks like a va_list.
auto isVaList = [&S](QualType T) -> bool {
auto *typedefTy = T->getAs<TypedefType>();
if (!typedefTy)
return false;
TypedefDecl *vaListTypedef = S.Context.getBuiltinVaListDecl();
do {
if (typedefTy->getDecl() == vaListTypedef)
return true;
if (auto *name = typedefTy->getDecl()->getIdentifier())
if (name->isStr("va_list"))
return true;
typedefTy = typedefTy->desugar()->getAs<TypedefType>();
} while (typedefTy);
return false;
};
// Local function that checks the nullability for a given pointer declarator.
// Returns true if _Nonnull was inferred.
auto inferPointerNullability =
[&](SimplePointerKind pointerKind, SourceLocation pointerLoc,
SourceLocation pointerEndLoc,
ParsedAttributesView &attrs, AttributePool &Pool) -> ParsedAttr * {
// We've seen a pointer.
if (NumPointersRemaining > 0)
--NumPointersRemaining;
// If a nullability attribute is present, there's nothing to do.
if (hasNullabilityAttr(attrs))
return nullptr;
// If we're supposed to infer nullability, do so now.
if (inferNullability && !inferNullabilityInnerOnlyComplete) {
ParsedAttr::Syntax syntax = inferNullabilityCS
? ParsedAttr::AS_ContextSensitiveKeyword
: ParsedAttr::AS_Keyword;
ParsedAttr *nullabilityAttr = Pool.create(
S.getNullabilityKeyword(*inferNullability), SourceRange(pointerLoc),
nullptr, SourceLocation(), nullptr, 0, syntax);
attrs.addAtEnd(nullabilityAttr);
if (inferNullabilityCS) {
state.getDeclarator().getMutableDeclSpec().getObjCQualifiers()
->setObjCDeclQualifier(ObjCDeclSpec::DQ_CSNullability);
}
if (pointerLoc.isValid() &&
complainAboutInferringWithinChunk !=
PointerWrappingDeclaratorKind::None) {
auto Diag =
S.Diag(pointerLoc, diag::warn_nullability_inferred_on_nested_type);
Diag << static_cast<int>(complainAboutInferringWithinChunk);
fixItNullability(S, Diag, pointerLoc, NullabilityKind::NonNull);
}
if (inferNullabilityInnerOnly)
inferNullabilityInnerOnlyComplete = true;
return nullabilityAttr;
}
// If we're supposed to complain about missing nullability, do so
// now if it's truly missing.
switch (complainAboutMissingNullability) {
case CAMN_No:
break;
case CAMN_InnerPointers:
if (NumPointersRemaining == 0)
break;
LLVM_FALLTHROUGH;
case CAMN_Yes:
checkNullabilityConsistency(S, pointerKind, pointerLoc, pointerEndLoc);
}
return nullptr;
};
// If the type itself could have nullability but does not, infer pointer
// nullability and perform consistency checking.
if (S.CodeSynthesisContexts.empty()) {
if (T->canHaveNullability(/*ResultIfUnknown*/false) &&
!T->getNullability(S.Context)) {
if (isVaList(T)) {
// Record that we've seen a pointer, but do nothing else.
if (NumPointersRemaining > 0)
--NumPointersRemaining;
} else {
SimplePointerKind pointerKind = SimplePointerKind::Pointer;
if (T->isBlockPointerType())
pointerKind = SimplePointerKind::BlockPointer;
else if (T->isMemberPointerType())
pointerKind = SimplePointerKind::MemberPointer;
if (auto *attr = inferPointerNullability(
pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
D.getDeclSpec().getEndLoc(),
D.getMutableDeclSpec().getAttributes(),
D.getMutableDeclSpec().getAttributePool())) {
T = state.getAttributedType(
createNullabilityAttr(Context, *attr, *inferNullability), T, T);
}
}
}
if (complainAboutMissingNullability == CAMN_Yes &&
T->isArrayType() && !T->getNullability(S.Context) && !isVaList(T) &&
D.isPrototypeContext() &&
!hasOuterPointerLikeChunk(D, D.getNumTypeObjects())) {
checkNullabilityConsistency(S, SimplePointerKind::Array,
D.getDeclSpec().getTypeSpecTypeLoc());
}
}
bool ExpectNoDerefChunk =
state.getCurrentAttributes().hasAttribute(ParsedAttr::AT_NoDeref);
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
IsQualifiedFunction &= DeclType.Kind == DeclaratorChunk::Paren;
switch (DeclType.Kind) {
case DeclaratorChunk::Paren:
if (i == 0)
warnAboutRedundantParens(S, D, T);
T = S.BuildParenType(T);
break;
case DeclaratorChunk::BlockPointer:
// If blocks are disabled, emit an error.
if (!LangOpts.Blocks)
S.Diag(DeclType.Loc, diag::err_blocks_disable) << LangOpts.OpenCL;
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::BlockPointer, DeclType.Loc,
DeclType.EndLoc, DeclType.getAttrs(),
state.getDeclarator().getAttributePool());
T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
if (DeclType.Cls.TypeQuals || LangOpts.OpenCL) {
// OpenCL v2.0, s6.12.5 - Block variable declarations are implicitly
// qualified with const.
if (LangOpts.OpenCL)
DeclType.Cls.TypeQuals |= DeclSpec::TQ_const;
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
}
break;
case DeclaratorChunk::Pointer:
// Verify that we're not building a pointer to pointer to function with
// exception specification.
if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
// Handle pointer nullability
inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
DeclType.EndLoc, DeclType.getAttrs(),
state.getDeclarator().getAttributePool());
if (LangOpts.ObjC && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
if (DeclType.Ptr.TypeQuals)
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
}
// OpenCL v2.0 s6.9b - Pointer to image/sampler cannot be used.
// OpenCL v2.0 s6.13.16.1 - Pointer to pipe cannot be used.
// OpenCL v2.0 s6.12.5 - Pointers to Blocks are not allowed.
if (LangOpts.OpenCL) {
if (T->isImageType() || T->isSamplerT() || T->isPipeType() ||
T->isBlockPointerType()) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_pointer_to_type) << T;
D.setInvalidType(true);
}
}
T = S.BuildPointerType(T, DeclType.Loc, Name);
if (DeclType.Ptr.TypeQuals)
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
case DeclaratorChunk::Reference: {
// Verify that we're not building a reference to pointer to function with
// exception specification.
if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
if (DeclType.Ref.HasRestrict)
T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
break;
}
case DeclaratorChunk::Array: {
// Verify that we're not building an array of pointers to function with
// exception specification.
if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
ArrayType::ArraySizeModifier ASM;
if (ATI.isStar)
ASM = ArrayType::Star;
else if (ATI.hasStatic)
ASM = ArrayType::Static;
else
ASM = ArrayType::Normal;
if (ASM == ArrayType::Star && !D.isPrototypeContext()) {
// FIXME: This check isn't quite right: it allows star in prototypes
// for function definitions, and disallows some edge cases detailed
// in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
ASM = ArrayType::Normal;
D.setInvalidType(true);
}
// C99 6.7.5.2p1: The optional type qualifiers and the keyword static
// shall appear only in a declaration of a function parameter with an
// array type, ...
if (ASM == ArrayType::Static || ATI.TypeQuals) {
if (!(D.isPrototypeContext() ||
D.getContext() == DeclaratorContext::KNRTypeListContext)) {
S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
(ASM == ArrayType::Static ? "'static'" : "type qualifier");
// Remove the 'static' and the type qualifiers.
if (ASM == ArrayType::Static)
ASM = ArrayType::Normal;
ATI.TypeQuals = 0;
D.setInvalidType(true);
}
// C99 6.7.5.2p1: ... and then only in the outermost array type
// derivation.
if (hasOuterPointerLikeChunk(D, chunkIndex)) {
S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
(ASM == ArrayType::Static ? "'static'" : "type qualifier");
if (ASM == ArrayType::Static)
ASM = ArrayType::Normal;
ATI.TypeQuals = 0;
D.setInvalidType(true);
}
}
const AutoType *AT = T->getContainedAutoType();
// Allow arrays of auto if we are a generic lambda parameter.
// i.e. [](auto (&array)[5]) { return array[0]; }; OK
if (AT &&
D.getContext() != DeclaratorContext::LambdaExprParameterContext) {
// We've already diagnosed this for decltype(auto).
if (!AT->isDecltypeAuto())
S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
<< getPrintableNameForEntity(Name) << T;
T = QualType();
break;
}
// Array parameters can be marked nullable as well, although it's not
// necessary if they're marked 'static'.
if (complainAboutMissingNullability == CAMN_Yes &&
!hasNullabilityAttr(DeclType.getAttrs()) &&
ASM != ArrayType::Static &&
D.isPrototypeContext() &&
!hasOuterPointerLikeChunk(D, chunkIndex)) {
checkNullabilityConsistency(S, SimplePointerKind::Array, DeclType.Loc);
}
T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
break;
}
case DeclaratorChunk::Function: {
// If the function declarator has a prototype (i.e. it is not () and
// does not have a K&R-style identifier list), then the arguments are part
// of the type, otherwise the argument list is ().
DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
IsQualifiedFunction =
FTI.hasMethodTypeQualifiers() || FTI.hasRefQualifier();
// Check for auto functions and trailing return type and adjust the
// return type accordingly.
if (!D.isInvalidType()) {
// trailing-return-type is only required if we're declaring a function,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().hasAutoTypeSpec() &&
!FTI.hasTrailingReturnType() && chunkIndex == 0) {
if (!S.getLangOpts().CPlusPlus14) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
? diag::err_auto_missing_trailing_return
: diag::err_deduced_return_type);
T = Context.IntTy;
D.setInvalidType(true);
} else {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::warn_cxx11_compat_deduced_return_type);
}
} else if (FTI.hasTrailingReturnType()) {
// T must be exactly 'auto' at this point. See CWG issue 681.
if (isa<ParenType>(T)) {
S.Diag(D.getBeginLoc(), diag::err_trailing_return_in_parens)
<< T << D.getSourceRange();
D.setInvalidType(true);
} else if (D.getName().getKind() ==
UnqualifiedIdKind::IK_DeductionGuideName) {
if (T != Context.DependentTy) {
S.Diag(D.getDeclSpec().getBeginLoc(),
diag::err_deduction_guide_with_complex_decl)
<< D.getSourceRange();
D.setInvalidType(true);
}
} else if (D.getContext() != DeclaratorContext::LambdaExprContext &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
cast<AutoType>(T)->getKeyword() !=
AutoTypeKeyword::Auto)) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
}
T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo);
if (T.isNull()) {
// An error occurred parsing the trailing return type.
T = Context.IntTy;
D.setInvalidType(true);
}
} else {
// This function type is not the type of the entity being declared,
// so checking the 'auto' is not the responsibility of this chunk.
}
}
// C99 6.7.5.3p1: The return type may not be a function or array type.
// For conversion functions, we'll diagnose this particular error later.
if (!D.isInvalidType() && (T->isArrayType() || T->isFunctionType()) &&
(D.getName().getKind() !=
UnqualifiedIdKind::IK_ConversionFunctionId)) {
unsigned diagID = diag::err_func_returning_array_function;
// Last processing chunk in block context means this function chunk
// represents the block.
if (chunkIndex == 0 &&
D.getContext() == DeclaratorContext::BlockLiteralContext)
diagID = diag::err_block_returning_array_function;
S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
D.setInvalidType(true);
}
// Do not allow returning half FP value.
// FIXME: This really should be in BuildFunctionType.
if (T->isHalfType()) {
if (S.getLangOpts().OpenCL) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
<< T << 0 /*pointer hint*/;
D.setInvalidType(true);
}
} else if (!S.getLangOpts().HalfArgsAndReturns) {
S.Diag(D.getIdentifierLoc(),
diag::err_parameters_retval_cannot_have_fp16_type) << 1;
D.setInvalidType(true);
}
}
if (LangOpts.OpenCL) {
// OpenCL v2.0 s6.12.5 - A block cannot be the return value of a
// function.
if (T->isBlockPointerType() || T->isImageType() || T->isSamplerT() ||
T->isPipeType()) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
<< T << 1 /*hint off*/;
D.setInvalidType(true);
}
// OpenCL doesn't support variadic functions and blocks
// (s6.9.e and s6.12.5 OpenCL v2.0) except for printf.
// We also allow here any toolchain reserved identifiers.
if (FTI.isVariadic &&
!(D.getIdentifier() &&
((D.getIdentifier()->getName() == "printf" &&
(LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) ||
D.getIdentifier()->getName().startswith("__")))) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_variadic_function);
D.setInvalidType(true);
}
}
// Methods cannot return interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
SourceLocation DiagLoc, FixitLoc;
if (TInfo) {
DiagLoc = TInfo->getTypeLoc().getBeginLoc();
FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getEndLoc());
} else {
DiagLoc = D.getDeclSpec().getTypeSpecTypeLoc();
FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getEndLoc());
}
S.Diag(DiagLoc, diag::err_object_cannot_be_passed_returned_by_value)
<< 0 << T
<< FixItHint::CreateInsertion(FixitLoc, "*");
T = Context.getObjCObjectPointerType(T);
if (TInfo) {
TypeLocBuilder TLB;
TLB.pushFullCopy(TInfo->getTypeLoc());
ObjCObjectPointerTypeLoc TLoc = TLB.push<ObjCObjectPointerTypeLoc>(T);
TLoc.setStarLoc(FixitLoc);
TInfo = TLB.getTypeSourceInfo(Context, T);
}
D.setInvalidType(true);
}
// cv-qualifiers on return types are pointless except when the type is a
// class type in C++.
if ((T.getCVRQualifiers() || T->isAtomicType()) &&
!(S.getLangOpts().CPlusPlus &&
(T->isDependentType() || T->isRecordType()))) {
if (T->isVoidType() && !S.getLangOpts().CPlusPlus &&
D.getFunctionDefinitionKind() == FDK_Definition) {
// [6.9.1/3] qualified void return is invalid on a C
// function definition. Apparently ok on declarations and
// in C++ though (!)
S.Diag(DeclType.Loc, diag::err_func_returning_qualified_void) << T;
} else
diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex);
}
// Objective-C ARC ownership qualifiers are ignored on the function
// return type (by type canonicalization). Complain if this attribute
// was written here.
if (T.getQualifiers().hasObjCLifetime()) {
SourceLocation AttrLoc;
if (chunkIndex + 1 < D.getNumTypeObjects()) {
DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1);
for (const ParsedAttr &AL : ReturnTypeChunk.getAttrs()) {
if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) {
AttrLoc = AL.getLoc();
break;
}
}
}
if (AttrLoc.isInvalid()) {
for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) {
AttrLoc = AL.getLoc();
break;
}
}
}
if (AttrLoc.isValid()) {
// The ownership attributes are almost always written via
// the predefined
// __strong/__weak/__autoreleasing/__unsafe_unretained.
if (AttrLoc.isMacroID())
AttrLoc =
S.SourceMgr.getImmediateExpansionRange(AttrLoc).getBegin();
S.Diag(AttrLoc, diag::warn_arc_lifetime_result_type)
<< T.getQualifiers().getObjCLifetime();
}
}
if (LangOpts.CPlusPlus && D.getDeclSpec().hasTagDefinition()) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
<< Context.getTypeDeclType(Tag);
}
// Exception specs are not allowed in typedefs. Complain, but add it
// anyway.
if (IsTypedefName && FTI.getExceptionSpecType() && !LangOpts.CPlusPlus17)
S.Diag(FTI.getExceptionSpecLocBeg(),
diag::err_exception_spec_in_typedef)
<< (D.getContext() == DeclaratorContext::AliasDeclContext ||
D.getContext() == DeclaratorContext::AliasTemplateContext);
// If we see "T var();" or "T var(T());" at block scope, it is probably
// an attempt to initialize a variable, not a function declaration.
if (FTI.isAmbiguous)
warnAboutAmbiguousFunction(S, D, DeclType, T);
FunctionType::ExtInfo EI(
getCCForDeclaratorChunk(S, D, DeclType.getAttrs(), FTI, chunkIndex));
if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus
&& !LangOpts.OpenCL) {
// Simple void foo(), where the incoming T is the result type.
T = Context.getFunctionNoProtoType(T, EI);
} else {
// We allow a zero-parameter variadic function in C if the
// function is marked with the "overloadable" attribute. Scan
// for this attribute now.
if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus)
if (!D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable))
S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param);
if (FTI.NumParams && FTI.Params[0].Param == nullptr) {
// C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
// definition.
S.Diag(FTI.Params[0].IdentLoc,
diag::err_ident_list_in_fn_declaration);
D.setInvalidType(true);
// Recover by creating a K&R-style function type.
T = Context.getFunctionNoProtoType(T, EI);
break;
}
FunctionProtoType::ExtProtoInfo EPI;
EPI.ExtInfo = EI;
EPI.Variadic = FTI.isVariadic;
EPI.HasTrailingReturn = FTI.hasTrailingReturnType();
EPI.TypeQuals.addCVRUQualifiers(
FTI.MethodQualifiers ? FTI.MethodQualifiers->getTypeQualifiers()
: 0);
EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
: FTI.RefQualifierIsLValueRef? RQ_LValue
: RQ_RValue;
// Otherwise, we have a function with a parameter list that is
// potentially variadic.
SmallVector<QualType, 16> ParamTys;
ParamTys.reserve(FTI.NumParams);
SmallVector<FunctionProtoType::ExtParameterInfo, 16>
ExtParameterInfos(FTI.NumParams);
bool HasAnyInterestingExtParameterInfos = false;
for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
QualType ParamTy = Param->getType();
assert(!ParamTy.isNull() && "Couldn't parse type?");
// Look for 'void'. void is allowed only as a single parameter to a
// function with no other parameters (C99 6.7.5.3p10). We record
// int(void) as a FunctionProtoType with an empty parameter list.
if (ParamTy->isVoidType()) {
// If this is something like 'float(int, void)', reject it. 'void'
// is an incomplete type (C99 6.2.5p19) and function decls cannot
// have parameters of incomplete type.
if (FTI.NumParams != 1 || FTI.isVariadic) {
S.Diag(DeclType.Loc, diag::err_void_only_param);
ParamTy = Context.IntTy;
Param->setType(ParamTy);
} else if (FTI.Params[i].Ident) {
// Reject, but continue to parse 'int(void abc)'.
S.Diag(FTI.Params[i].IdentLoc, diag::err_param_with_void_type);
ParamTy = Context.IntTy;
Param->setType(ParamTy);
} else {
// Reject, but continue to parse 'float(const void)'.
if (ParamTy.hasQualifiers())
S.Diag(DeclType.Loc, diag::err_void_param_qualified);
// Do not add 'void' to the list.
break;
}
} else if (ParamTy->isHalfType()) {
// Disallow half FP parameters.
// FIXME: This really should be in BuildFunctionType.
if (S.getLangOpts().OpenCL) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
S.Diag(Param->getLocation(),
diag::err_opencl_half_param) << ParamTy;
D.setInvalidType();
Param->setInvalidDecl();
}
} else if (!S.getLangOpts().HalfArgsAndReturns) {
S.Diag(Param->getLocation(),
diag::err_parameters_retval_cannot_have_fp16_type) << 0;
D.setInvalidType();
}
} else if (!FTI.hasPrototype) {
if (ParamTy->isPromotableIntegerType()) {
ParamTy = Context.getPromotedIntegerType(ParamTy);
Param->setKNRPromoted(true);
} else if (const BuiltinType* BTy = ParamTy->getAs<BuiltinType>()) {
if (BTy->getKind() == BuiltinType::Float) {
ParamTy = Context.DoubleTy;
Param->setKNRPromoted(true);
}
}
}
if (LangOpts.ObjCAutoRefCount && Param->hasAttr<NSConsumedAttr>()) {
ExtParameterInfos[i] = ExtParameterInfos[i].withIsConsumed(true);
HasAnyInterestingExtParameterInfos = true;
}
if (auto attr = Param->getAttr<ParameterABIAttr>()) {
ExtParameterInfos[i] =
ExtParameterInfos[i].withABI(attr->getABI());
HasAnyInterestingExtParameterInfos = true;
}
if (Param->hasAttr<PassObjectSizeAttr>()) {
ExtParameterInfos[i] = ExtParameterInfos[i].withHasPassObjectSize();
HasAnyInterestingExtParameterInfos = true;
}
if (Param->hasAttr<NoEscapeAttr>()) {
ExtParameterInfos[i] = ExtParameterInfos[i].withIsNoEscape(true);
HasAnyInterestingExtParameterInfos = true;
}
ParamTys.push_back(ParamTy);
}
if (HasAnyInterestingExtParameterInfos) {
EPI.ExtParameterInfos = ExtParameterInfos.data();
checkExtParameterInfos(S, ParamTys, EPI,
[&](unsigned i) { return FTI.Params[i].Param->getLocation(); });
}
SmallVector<QualType, 4> Exceptions;
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
Expr *NoexceptExpr = nullptr;
if (FTI.getExceptionSpecType() == EST_Dynamic) {
// FIXME: It's rather inefficient to have to split into two vectors
// here.
unsigned N = FTI.getNumExceptions();
DynamicExceptions.reserve(N);
DynamicExceptionRanges.reserve(N);
for (unsigned I = 0; I != N; ++I) {
DynamicExceptions.push_back(FTI.Exceptions[I].Ty);
DynamicExceptionRanges.push_back(FTI.Exceptions[I].Range);
}
} else if (isComputedNoexcept(FTI.getExceptionSpecType())) {
NoexceptExpr = FTI.NoexceptExpr;
}
S.checkExceptionSpecification(D.isFunctionDeclarationContext(),
FTI.getExceptionSpecType(),
DynamicExceptions,
DynamicExceptionRanges,
NoexceptExpr,
Exceptions,
EPI.ExceptionSpec);
// FIXME: Set address space from attrs for C++ mode here.
// OpenCLCPlusPlus: A class member function has an address space.
auto IsClassMember = [&]() {
return (!state.getDeclarator().getCXXScopeSpec().isEmpty() &&
state.getDeclarator()
.getCXXScopeSpec()
.getScopeRep()
->getKind() == NestedNameSpecifier::TypeSpec) ||
state.getDeclarator().getContext() ==
DeclaratorContext::MemberContext;
};
if (state.getSema().getLangOpts().OpenCLCPlusPlus && IsClassMember()) {
LangAS ASIdx = LangAS::Default;
// Take address space attr if any and mark as invalid to avoid adding
// them later while creating QualType.
if (FTI.MethodQualifiers)
for (ParsedAttr &attr : FTI.MethodQualifiers->getAttributes()) {
LangAS ASIdxNew = attr.asOpenCLLangAS();
if (DiagnoseMultipleAddrSpaceAttributes(S, ASIdx, ASIdxNew,
attr.getLoc()))
D.setInvalidType(true);
else
ASIdx = ASIdxNew;
}
// If a class member function's address space is not set, set it to
// __generic.
LangAS AS =
(ASIdx == LangAS::Default ? LangAS::opencl_generic : ASIdx);
EPI.TypeQuals.addAddressSpace(AS);
}
T = Context.getFunctionType(T, ParamTys, EPI);
}
break;
}
case DeclaratorChunk::MemberPointer: {
// The scope spec must refer to a class, or be dependent.
CXXScopeSpec &SS = DeclType.Mem.Scope();
QualType ClsType;
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::MemberPointer, DeclType.Loc,
DeclType.EndLoc, DeclType.getAttrs(),
state.getDeclarator().getAttributePool());
if (SS.isInvalid()) {
// Avoid emitting extra errors if we already errored on the scope.
D.setInvalidType(true);
} else if (S.isDependentScopeSpecifier(SS) ||
dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
NestedNameSpecifier *NNS = SS.getScopeRep();
NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
ClsType = Context.getDependentNameType(ETK_None, NNSPrefix,
NNS->getAsIdentifier());
break;
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
llvm_unreachable("Nested-name-specifier must name a type");
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
ClsType = QualType(NNS->getAsType(), 0);
// Note: if the NNS has a prefix and ClsType is a nondependent
// TemplateSpecializationType, then the NNS prefix is NOT included
// in ClsType; hence we wrap ClsType into an ElaboratedType.
// NOTE: in particular, no wrap occurs if ClsType already is an
// Elaborated, DependentName, or DependentTemplateSpecialization.
if (NNSPrefix && isa<TemplateSpecializationType>(NNS->getAsType()))
ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType);
break;
}
} else {
S.Diag(DeclType.Mem.Scope().getBeginLoc(),
diag::err_illegal_decl_mempointer_in_nonclass)
<< (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
<< DeclType.Mem.Scope().getRange();
D.setInvalidType(true);
}
if (!ClsType.isNull())
T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc,
D.getIdentifier());
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
} else if (DeclType.Mem.TypeQuals) {
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
}
break;
}
case DeclaratorChunk::Pipe: {
T = S.BuildReadPipeType(T, DeclType.Loc);
processTypeAttrs(state, T, TAL_DeclSpec,
D.getMutableDeclSpec().getAttributes());
break;
}
}
if (T.isNull()) {
D.setInvalidType(true);
T = Context.IntTy;
}
// See if there are any attributes on this declarator chunk.
processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
if (DeclType.Kind != DeclaratorChunk::Paren) {
if (ExpectNoDerefChunk && !IsNoDerefableChunk(DeclType))
S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array);
ExpectNoDerefChunk = state.didParseNoDeref();
}
}
if (ExpectNoDerefChunk)
S.Diag(state.getDeclarator().getBeginLoc(),
diag::warn_noderef_on_non_pointer_or_array);
// GNU warning -Wstrict-prototypes
// Warn if a function declaration is without a prototype.
// This warning is issued for all kinds of unprototyped function
// declarations (i.e. function type typedef, function pointer etc.)
// C99 6.7.5.3p14:
// The empty list in a function declarator that is not part of a definition
// of that function specifies that no information about the number or types
// of the parameters is supplied.
if (!LangOpts.CPlusPlus && D.getFunctionDefinitionKind() == FDK_Declaration) {
bool IsBlock = false;
for (const DeclaratorChunk &DeclType : D.type_objects()) {
switch (DeclType.Kind) {
case DeclaratorChunk::BlockPointer:
IsBlock = true;
break;
case DeclaratorChunk::Function: {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
// We supress the warning when there's no LParen location, as this
// indicates the declaration was an implicit declaration, which gets
// warned about separately via -Wimplicit-function-declaration.
if (FTI.NumParams == 0 && !FTI.isVariadic && FTI.getLParenLoc().isValid())
S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
<< IsBlock
<< FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
IsBlock = false;
break;
}
default:
break;
}
}
}
assert(!T.isNull() && "T must not be null after this point");
if (LangOpts.CPlusPlus && T->isFunctionType()) {
const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
// C++ 8.3.5p4:
// A cv-qualifier-seq shall only be part of the function type
// for a nonstatic member function, the function type to which a pointer
// to member refers, or the top-level function type of a function typedef
// declaration.
//
// Core issue 547 also allows cv-qualifiers on function types that are
// top-level template type arguments.
enum { NonMember, Member, DeductionGuide } Kind = NonMember;
if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName)
Kind = DeductionGuide;
else if (!D.getCXXScopeSpec().isSet()) {
if ((D.getContext() == DeclaratorContext::MemberContext ||
D.getContext() == DeclaratorContext::LambdaExprContext) &&
!D.getDeclSpec().isFriendSpecified())
Kind = Member;
} else {
DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec());
if (!DC || DC->isRecord())
Kind = Member;
}
// C++11 [dcl.fct]p6 (w/DR1417):
// An attempt to specify a function type with a cv-qualifier-seq or a
// ref-qualifier (including by typedef-name) is ill-formed unless it is:
// - the function type for a non-static member function,
// - the function type to which a pointer to member refers,
// - the top-level function type of a function typedef declaration or
// alias-declaration,
// - the type-id in the default argument of a type-parameter, or
// - the type-id of a template-argument for a type-parameter
//
// FIXME: Checking this here is insufficient. We accept-invalid on:
//
// template<typename T> struct S { void f(T); };
// S<int() const> s;
//
// ... for instance.
if (IsQualifiedFunction &&
!(Kind == Member &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
!IsTypedefName &&
D.getContext() != DeclaratorContext::TemplateArgContext &&
D.getContext() != DeclaratorContext::TemplateTypeArgContext) {
SourceLocation Loc = D.getBeginLoc();
SourceRange RemovalRange;
unsigned I;
if (D.isFunctionDeclarator(I)) {
SmallVector<SourceLocation, 4> RemovalLocs;
const DeclaratorChunk &Chunk = D.getTypeObject(I);
assert(Chunk.Kind == DeclaratorChunk::Function);
if (Chunk.Fun.hasRefQualifier())
RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc());
if (Chunk.Fun.hasMethodTypeQualifiers())
Chunk.Fun.MethodQualifiers->forEachQualifier(
[&](DeclSpec::TQ TypeQual, StringRef QualName,
SourceLocation SL) { RemovalLocs.push_back(SL); });
if (!RemovalLocs.empty()) {
llvm::sort(RemovalLocs,
BeforeThanCompare<SourceLocation>(S.getSourceManager()));
RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
Loc = RemovalLocs.front();
}
}
S.Diag(Loc, diag::err_invalid_qualified_function_type)
<< Kind << D.isFunctionDeclarator() << T
<< getFunctionQualifiersAsString(FnTy)
<< FixItHint::CreateRemoval(RemovalRange);
// Strip the cv-qualifiers and ref-qualifiers from the type.
FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
EPI.TypeQuals.removeCVRQualifiers();
EPI.RefQualifier = RQ_None;
T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(),
EPI);
// Rebuild any parens around the identifier in the function type.
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren)
break;
T = S.BuildParenType(T);
}
}
}
// Apply any undistributed attributes from the declarator.
processTypeAttrs(state, T, TAL_DeclName, D.getAttributes());
// Diagnose any ignored type attributes.
state.diagnoseIgnoredTypeAttrs(T);
// C++0x [dcl.constexpr]p9:
// A constexpr specifier used in an object declaration declares the object
// as const.
if (D.getDeclSpec().hasConstexprSpecifier() && T->isObjectType()) {
T.addConst();
}
// If there was an ellipsis in the declarator, the declaration declares a
// parameter pack whose type may be a pack expansion type.
if (D.hasEllipsis()) {
// C++0x [dcl.fct]p13:
// A declarator-id or abstract-declarator containing an ellipsis shall
// only be used in a parameter-declaration. Such a parameter-declaration
// is a parameter pack (14.5.3). [...]
switch (D.getContext()) {
case DeclaratorContext::PrototypeContext:
case DeclaratorContext::LambdaExprParameterContext:
// C++0x [dcl.fct]p13:
// [...] When it is part of a parameter-declaration-clause, the
// parameter pack is a function parameter pack (14.5.3). The type T
// of the declarator-id of the function parameter pack shall contain
// a template parameter pack; each template parameter pack in T is
// expanded by the function parameter pack.
//
// We represent function parameter packs as function parameters whose
// type is a pack expansion.
if (!T->containsUnexpandedParameterPack()) {
S.Diag(D.getEllipsisLoc(),
diag::err_function_parameter_pack_without_parameter_packs)
<< T << D.getSourceRange();
D.setEllipsisLoc(SourceLocation());
} else {
T = Context.getPackExpansionType(T, None);
}
break;
case DeclaratorContext::TemplateParamContext:
// C++0x [temp.param]p15:
// If a template-parameter is a [...] is a parameter-declaration that
// declares a parameter pack (8.3.5), then the template-parameter is a
// template parameter pack (14.5.3).
//
// Note: core issue 778 clarifies that, if there are any unexpanded
// parameter packs in the type of the non-type template parameter, then
// it expands those parameter packs.
if (T->containsUnexpandedParameterPack())
T = Context.getPackExpansionType(T, None);
else
S.Diag(D.getEllipsisLoc(),
LangOpts.CPlusPlus11
? diag::warn_cxx98_compat_variadic_templates
: diag::ext_variadic_templates);
break;
case DeclaratorContext::FileContext:
case DeclaratorContext::KNRTypeListContext:
case DeclaratorContext::ObjCParameterContext: // FIXME: special diagnostic
// here?
case DeclaratorContext::ObjCResultContext: // FIXME: special diagnostic
// here?
case DeclaratorContext::TypeNameContext:
case DeclaratorContext::FunctionalCastContext:
case DeclaratorContext::CXXNewContext:
case DeclaratorContext::AliasDeclContext:
case DeclaratorContext::AliasTemplateContext:
case DeclaratorContext::MemberContext:
case DeclaratorContext::BlockContext:
case DeclaratorContext::ForContext:
case DeclaratorContext::InitStmtContext:
case DeclaratorContext::ConditionContext:
case DeclaratorContext::CXXCatchContext:
case DeclaratorContext::ObjCCatchContext:
case DeclaratorContext::BlockLiteralContext:
case DeclaratorContext::LambdaExprContext:
case DeclaratorContext::ConversionIdContext:
case DeclaratorContext::TrailingReturnContext:
case DeclaratorContext::TrailingReturnVarContext:
case DeclaratorContext::TemplateArgContext:
case DeclaratorContext::TemplateTypeArgContext:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
S.Diag(D.getEllipsisLoc(),
diag::err_ellipsis_in_declarator_not_parameter);
D.setEllipsisLoc(SourceLocation());
break;
}
}
assert(!T.isNull() && "T must not be null at the end of this function");
if (D.isInvalidType())
return Context.getTrivialTypeSourceInfo(T);
return GetTypeSourceInfoForDeclarator(state, T, TInfo);
}
/// GetTypeForDeclarator - Convert the type for the specified
/// declarator to Type instances.
///
/// The result of this call will never be null, but the associated
/// type may be a null type if there's an unrecoverable error.
TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
// Determine the type of the declarator. Not all forms of declarator
// have a type.
TypeProcessingState state(*this, D);
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
inferARCWriteback(state, T);
return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
}
static void transferARCOwnershipToDeclSpec(Sema &S,
QualType &declSpecTy,
Qualifiers::ObjCLifetime ownership) {
if (declSpecTy->isObjCRetainableType() &&
declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) {
Qualifiers qs;
qs.addObjCLifetime(ownership);
declSpecTy = S.Context.getQualifiedType(declSpecTy, qs);
}
}
static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
Qualifiers::ObjCLifetime ownership,
unsigned chunkIndex) {
Sema &S = state.getSema();
Declarator &D = state.getDeclarator();
// Look for an explicit lifetime attribute.
DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
if (chunk.getAttrs().hasAttribute(ParsedAttr::AT_ObjCOwnership))
return;
const char *attrStr = nullptr;
switch (ownership) {
case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break;
case Qualifiers::OCL_Strong: attrStr = "strong"; break;
case Qualifiers::OCL_Weak: attrStr = "weak"; break;
case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break;
}
IdentifierLoc *Arg = new (S.Context) IdentifierLoc;
Arg->Ident = &S.Context.Idents.get(attrStr);
Arg->Loc = SourceLocation();
ArgsUnion Args(Arg);
// If there wasn't one, add one (with an invalid source location
// so that we don't make an AttributedType for it).
ParsedAttr *attr = D.getAttributePool().create(
&S.Context.Idents.get("objc_ownership"), SourceLocation(),
/*scope*/ nullptr, SourceLocation(),
/*args*/ &Args, 1, ParsedAttr::AS_GNU);
chunk.getAttrs().addAtEnd(attr);
// TODO: mark whether we did this inference?
}
/// Used for transferring ownership in casts resulting in l-values.
static void transferARCOwnership(TypeProcessingState &state,
QualType &declSpecTy,
Qualifiers::ObjCLifetime ownership) {
Sema &S = state.getSema();
Declarator &D = state.getDeclarator();
int inner = -1;
bool hasIndirection = false;
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
DeclaratorChunk &chunk = D.getTypeObject(i);
switch (chunk.Kind) {
case DeclaratorChunk::Paren:
// Ignore parens.
break;
case DeclaratorChunk::Array:
case DeclaratorChunk::Reference:
case DeclaratorChunk::Pointer:
if (inner != -1)
hasIndirection = true;
inner = i;
break;
case DeclaratorChunk::BlockPointer:
if (inner != -1)
transferARCOwnershipToDeclaratorChunk(state, ownership, i);
return;
case DeclaratorChunk::Function:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
return;
}
}
if (inner == -1)
return;
DeclaratorChunk &chunk = D.getTypeObject(inner);
if (chunk.Kind == DeclaratorChunk::Pointer) {
if (declSpecTy->isObjCRetainableType())
return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
if (declSpecTy->isObjCObjectType() && hasIndirection)
return transferARCOwnershipToDeclaratorChunk(state, ownership, inner);
} else {
assert(chunk.Kind == DeclaratorChunk::Array ||
chunk.Kind == DeclaratorChunk::Reference);
return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
}
}
TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
TypeProcessingState state(*this, D);
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
if (getLangOpts().ObjC) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
if (ownership != Qualifiers::OCL_None)
transferARCOwnership(state, declSpecTy, ownership);
}
return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
}
static void fillAttributedTypeLoc(AttributedTypeLoc TL,
TypeProcessingState &State) {
TL.setAttr(State.takeAttrForAttributedType(TL.getTypePtr()));
}
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
ASTContext &Context;
TypeProcessingState &State;
const DeclSpec &DS;
public:
TypeSpecLocFiller(ASTContext &Context, TypeProcessingState &State,
const DeclSpec &DS)
: Context(Context), State(State), DS(DS) {}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Visit(TL.getModifiedLoc());
fillAttributedTypeLoc(TL, State);
}
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
Visit(TL.getInnerLoc());
TL.setExpansionLoc(
State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
Visit(TL.getUnqualifiedLoc());
}
void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
// FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires
// addition field. What we have is good enough for dispay of location
// of 'fixit' on interface name.
TL.setNameEndLoc(DS.getEndLoc());
}
void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
TypeSourceInfo *RepTInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo);
TL.copy(RepTInfo->getTypeLoc());
}
void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
TypeSourceInfo *RepTInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo);
TL.copy(RepTInfo->getTypeLoc());
}
void VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
// If we got no declarator info from previous Sema routines,
// just fill with the typespec loc.
if (!TInfo) {
TL.initialize(Context, DS.getTypeSpecTypeNameLoc());
return;
}
TypeLoc OldTL = TInfo->getTypeLoc();
if (TInfo->getType()->getAs<ElaboratedType>()) {
ElaboratedTypeLoc ElabTL = OldTL.castAs<ElaboratedTypeLoc>();
TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc()
.castAs<TemplateSpecializationTypeLoc>();
TL.copy(NamedTL);
} else {
TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
assert(TL.getRAngleLoc() == OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
}
}
void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr);
TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
}
void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType);
TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
assert(DS.getRepAsType());
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
TL.setUnderlyingTInfo(TInfo);
}
void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
// FIXME: This holds only because we only have one unary transform.
assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType);
TL.setKWLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
assert(DS.getRepAsType());
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
TL.setUnderlyingTInfo(TInfo);
}
void VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
// By default, use the source location of the type specifier.
TL.setBuiltinLoc(DS.getTypeSpecTypeLoc());
if (TL.needsExtraLocalData()) {
// Set info for the written builtin specifiers.
TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs();
// Try to have a meaningful source location.
if (TL.getWrittenSignSpec() != TSS_unspecified)
TL.expandBuiltinRange(DS.getTypeSpecSignLoc());
if (TL.getWrittenWidthSpec() != TSW_unspecified)
TL.expandBuiltinRange(DS.getTypeSpecWidthRange());
}
}
void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
if (DS.getTypeSpecType() == TST_typename) {
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
if (TInfo) {
TL.copy(TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>());
return;
}
}
TL.setElaboratedKeywordLoc(Keyword != ETK_None
? DS.getTypeSpecTypeLoc()
: SourceLocation());
const CXXScopeSpec& SS = DS.getTypeSpecScope();
TL.setQualifierLoc(SS.getWithLocInContext(Context));
Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
}
void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
assert(DS.getTypeSpecType() == TST_typename);
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
assert(TInfo);
TL.copy(TInfo->getTypeLoc().castAs<DependentNameTypeLoc>());
}
void VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
assert(DS.getTypeSpecType() == TST_typename);
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
assert(TInfo);
TL.copy(
TInfo->getTypeLoc().castAs<DependentTemplateSpecializationTypeLoc>());
}
void VisitTagTypeLoc(TagTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
}
void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
// An AtomicTypeLoc can come from either an _Atomic(...) type specifier
// or an _Atomic qualifier.
if (DS.getTypeSpecType() == DeclSpec::TST_atomic) {
TL.setKWLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
assert(TInfo);
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
} else {
TL.setKWLoc(DS.getAtomicSpecLoc());
// No parens, to indicate this was spelled as an _Atomic qualifier.
TL.setParensRange(SourceRange());
Visit(TL.getValueLoc());
}
}
void VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(DS.getTypeSpecTypeLoc());
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
}
void VisitTypeLoc(TypeLoc TL) {
// FIXME: add other typespec types and change this to an assert.
TL.initialize(Context, DS.getTypeSpecTypeLoc());
}
};
class DeclaratorLocFiller : public TypeLocVisitor<DeclaratorLocFiller> {
ASTContext &Context;
TypeProcessingState &State;
const DeclaratorChunk &Chunk;
public:
DeclaratorLocFiller(ASTContext &Context, TypeProcessingState &State,
const DeclaratorChunk &Chunk)
: Context(Context), State(State), Chunk(Chunk) {}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
llvm_unreachable("qualified type locs not expected here!");
}
void VisitDecayedTypeLoc(DecayedTypeLoc TL) {
llvm_unreachable("decayed type locs not expected here!");
}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
fillAttributedTypeLoc(TL, State);
}
void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing
}
void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::BlockPointer);
TL.setCaretLoc(Chunk.Loc);
}
void VisitPointerTypeLoc(PointerTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Pointer);
TL.setStarLoc(Chunk.Loc);
}
void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Pointer);
TL.setStarLoc(Chunk.Loc);
}
void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::MemberPointer);
const CXXScopeSpec& SS = Chunk.Mem.Scope();
NestedNameSpecifierLoc NNSLoc = SS.getWithLocInContext(Context);
const Type* ClsTy = TL.getClass();
QualType ClsQT = QualType(ClsTy, 0);
TypeSourceInfo *ClsTInfo = Context.CreateTypeSourceInfo(ClsQT, 0);
// Now copy source location info into the type loc component.
TypeLoc ClsTL = ClsTInfo->getTypeLoc();
switch (NNSLoc.getNestedNameSpecifier()->getKind()) {
case NestedNameSpecifier::Identifier:
assert(isa<DependentNameType>(ClsTy) && "Unexpected TypeLoc");
{
DependentNameTypeLoc DNTLoc = ClsTL.castAs<DependentNameTypeLoc>();
DNTLoc.setElaboratedKeywordLoc(SourceLocation());
DNTLoc.setQualifierLoc(NNSLoc.getPrefix());
DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc());
}
break;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
if (isa<ElaboratedType>(ClsTy)) {
ElaboratedTypeLoc ETLoc = ClsTL.castAs<ElaboratedTypeLoc>();
ETLoc.setElaboratedKeywordLoc(SourceLocation());
ETLoc.setQualifierLoc(NNSLoc.getPrefix());
TypeLoc NamedTL = ETLoc.getNamedTypeLoc();
NamedTL.initializeFullCopy(NNSLoc.getTypeLoc());
} else {
ClsTL.initializeFullCopy(NNSLoc.getTypeLoc());
}
break;
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
llvm_unreachable("Nested-name-specifier must name a type");
}
// Finally fill in MemberPointerLocInfo fields.
TL.setStarLoc(Chunk.Loc);
TL.setClassTInfo(ClsTInfo);
}
void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Reference);
// 'Amp' is misleading: this might have been originally
/// spelled with AmpAmp.
TL.setAmpLoc(Chunk.Loc);
}
void VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Reference);
assert(!Chunk.Ref.LValueRef);
TL.setAmpAmpLoc(Chunk.Loc);
}
void VisitArrayTypeLoc(ArrayTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Array);
TL.setLBracketLoc(Chunk.Loc);
TL.setRBracketLoc(Chunk.EndLoc);
TL.setSizeExpr(static_cast<Expr*>(Chunk.Arr.NumElts));
}
void VisitFunctionTypeLoc(FunctionTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Function);
TL.setLocalRangeBegin(Chunk.Loc);
TL.setLocalRangeEnd(Chunk.EndLoc);
const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
TL.setLParenLoc(FTI.getLParenLoc());
TL.setRParenLoc(FTI.getRParenLoc());
for (unsigned i = 0, e = TL.getNumParams(), tpi = 0; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
TL.setParam(tpi++, Param);
}
TL.setExceptionSpecRange(FTI.getExceptionSpecRange());
}
void VisitParenTypeLoc(ParenTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Paren);
TL.setLParenLoc(Chunk.Loc);
TL.setRParenLoc(Chunk.EndLoc);
}
void VisitPipeTypeLoc(PipeTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
TL.setExpansionLoc(Chunk.Loc);
}
void VisitTypeLoc(TypeLoc TL) {
llvm_unreachable("unsupported TypeLoc kind in declarator!");
}
};
} // end anonymous namespace
static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) {
SourceLocation Loc;
switch (Chunk.Kind) {
case DeclaratorChunk::Function:
case DeclaratorChunk::Array:
case DeclaratorChunk::Paren:
case DeclaratorChunk::Pipe:
llvm_unreachable("cannot be _Atomic qualified");
case DeclaratorChunk::Pointer:
Loc = SourceLocation::getFromRawEncoding(Chunk.Ptr.AtomicQualLoc);
break;
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::Reference:
case DeclaratorChunk::MemberPointer:
// FIXME: Provide a source location for the _Atomic keyword.
break;
}
ATL.setKWLoc(Loc);
ATL.setParensRange(SourceRange());
}
static void
fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
const ParsedAttributesView &Attrs) {
for (const ParsedAttr &AL : Attrs) {
if (AL.getKind() == ParsedAttr::AT_AddressSpace) {
DASTL.setAttrNameLoc(AL.getLoc());
DASTL.setAttrExprOperand(AL.getArgAsExpr(0));
DASTL.setAttrOperandParensRange(SourceRange());
return;
}
}
llvm_unreachable(
"no address_space attribute found at the expected location!");
}
/// Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
///
/// \param ReturnTypeInfo For declarators whose return type does not show
/// up in the normal place in the declaration specifiers (such as a C++
/// conversion function), this pointer will refer to a type source information
/// for that return type.
static TypeSourceInfo *
GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
QualType T, TypeSourceInfo *ReturnTypeInfo) {
Sema &S = State.getSema();
Declarator &D = State.getDeclarator();
TypeSourceInfo *TInfo = S.Context.CreateTypeSourceInfo(T);
UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
// Handle parameter packs whose type is a pack expansion.
if (isa<PackExpansionType>(T)) {
CurrTL.castAs<PackExpansionTypeLoc>().setEllipsisLoc(D.getEllipsisLoc());
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
// An AtomicTypeLoc might be produced by an atomic qualifier in this
// declarator chunk.
if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) {
fillAtomicQualLoc(ATL, D.getTypeObject(i));
CurrTL = ATL.getValueLoc().getUnqualifiedLoc();
}
while (MacroQualifiedTypeLoc TL = CurrTL.getAs<MacroQualifiedTypeLoc>()) {
TL.setExpansionLoc(
State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
}
while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
fillAttributedTypeLoc(TL, State);
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
}
while (DependentAddressSpaceTypeLoc TL =
CurrTL.getAs<DependentAddressSpaceTypeLoc>()) {
fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs());
CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
}
// FIXME: Ordering here?
while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
DeclaratorLocFiller(S.Context, State, D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
// If we have different source information for the return type, use
// that. This really only applies to C++ conversion functions.
if (ReturnTypeInfo) {
TypeLoc TL = ReturnTypeInfo->getTypeLoc();
assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
} else {
TypeSpecLocFiller(S.Context, State, D.getDeclSpec()).Visit(CurrTL);
}
return TInfo;
}
/// Create a LocInfoType to hold the given QualType and TypeSourceInfo.
ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
// FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
// and Sema during declaration parsing. Try deallocating/caching them when
// it's appropriate, instead of allocating them and keeping them around.
LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
TypeAlignment);
new (LocT) LocInfoType(T, TInfo);
assert(LocT->getTypeClass() != T->getTypeClass() &&
"LocInfoType's TypeClass conflicts with an existing Type class");
return ParsedType::make(QualType(LocT, 0));
}
void LocInfoType::getAsStringInternal(std::string &Str,
const PrintingPolicy &Policy) const {
llvm_unreachable("LocInfoType leaked into the type system; an opaque TypeTy*"
" was used directly instead of getting the QualType through"
" GetTypeFromParser");
}
TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
// C99 6.7.6: Type names have no identifier. This is already validated by
// the parser.
assert(D.getIdentifier() == nullptr &&
"Type name should have no identifier!");
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
// Make sure there are no unused decl attributes on the declarator.
// We don't want to do this for ObjC parameters because we're going
// to apply them to the actual parameter declaration.
// Likewise, we don't want to do this for alias declarations, because
// we are actually going to build a declaration from this eventually.
if (D.getContext() != DeclaratorContext::ObjCParameterContext &&
D.getContext() != DeclaratorContext::AliasDeclContext &&
D.getContext() != DeclaratorContext::AliasTemplateContext)
checkUnusedDeclAttributes(D);
if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
return CreateParsedType(T, TInfo);
}
ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) {
QualType T = Context.getObjCInstanceType();
TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
return CreateParsedType(T, TInfo);
}
//===----------------------------------------------------------------------===//
// Type Attribute Processing
//===----------------------------------------------------------------------===//
/// Build an AddressSpace index from a constant expression and diagnose any
/// errors related to invalid address_spaces. Returns true on successfully
/// building an AddressSpace index.
static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
const Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
llvm::APSInt addrSpace(32);
if (!AddrSpace->isIntegerConstantExpr(addrSpace, S.Context)) {
S.Diag(AttrLoc, diag::err_attribute_argument_type)
<< "'address_space'" << AANT_ArgumentIntegerConstant
<< AddrSpace->getSourceRange();
return false;
}
// Bounds checking.
if (addrSpace.isSigned()) {
if (addrSpace.isNegative()) {
S.Diag(AttrLoc, diag::err_attribute_address_space_negative)
<< AddrSpace->getSourceRange();
return false;
}
addrSpace.setIsSigned(false);
}
llvm::APSInt max(addrSpace.getBitWidth());
max =
Qualifiers::MaxAddressSpace - (unsigned)LangAS::FirstTargetAddressSpace;
if (addrSpace > max) {
S.Diag(AttrLoc, diag::err_attribute_address_space_too_high)
<< (unsigned)max.getZExtValue() << AddrSpace->getSourceRange();
return false;
}
ASIdx =
getLangASFromTargetAS(static_cast<unsigned>(addrSpace.getZExtValue()));
return true;
}
// Default value for DependentAddressSpaceTypes
ASIdx = LangAS::Default;
return true;
}
/// BuildAddressSpaceAttr - Builds a DependentAddressSpaceType if an expression
/// is uninstantiated. If instantiated it will apply the appropriate address
/// space to the type. This function allows dependent template variables to be
/// used in conjunction with the address_space attribute
QualType Sema::BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
if (DiagnoseMultipleAddrSpaceAttributes(*this, T.getAddressSpace(), ASIdx,
AttrLoc))
return QualType();
return Context.getAddrSpaceQualType(T, ASIdx);
}
// A check with similar intentions as checking if a type already has an
// address space except for on a dependent types, basically if the
// current type is already a DependentAddressSpaceType then its already
// lined up to have another address space on it and we can't have
// multiple address spaces on the one pointer indirection
if (T->getAs<DependentAddressSpaceType>()) {
Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
return QualType();
}
return Context.getDependentAddressSpaceType(T, AddrSpace, AttrLoc);
}
QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc) {
LangAS ASIdx;
if (!BuildAddressSpaceIndex(*this, ASIdx, AddrSpace, AttrLoc))
return QualType();
return BuildAddressSpaceAttr(T, ASIdx, AddrSpace, AttrLoc);
}
/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
/// specified type. The attribute contains 1 argument, the id of the address
/// space for the type.
static void HandleAddressSpaceTypeAttribute(QualType &Type,
const ParsedAttr &Attr,
TypeProcessingState &State) {
Sema &S = State.getSema();
// ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be
// qualified by an address-space qualifier."
if (Type->isFunctionType()) {
S.Diag(Attr.getLoc(), diag::err_attribute_address_function_type);
Attr.setInvalid();
return;
}
LangAS ASIdx;
if (Attr.getKind() == ParsedAttr::AT_AddressSpace) {
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
<< 1;
Attr.setInvalid();
return;
}
Expr *ASArgExpr;
if (Attr.isArgIdent(0)) {
// Special case where the argument is a template id.
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult AddrSpace = S.ActOnIdExpression(
S.getCurScope(), SS, TemplateKWLoc, id, /*HasTrailingLParen=*/false,
/*IsAddressOfOperand=*/false);
if (AddrSpace.isInvalid())
return;
ASArgExpr = static_cast<Expr *>(AddrSpace.get());
} else {
ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
}
LangAS ASIdx;
if (!BuildAddressSpaceIndex(S, ASIdx, ASArgExpr, Attr.getLoc())) {
Attr.setInvalid();
return;
}
ASTContext &Ctx = S.Context;
auto *ASAttr = ::new (Ctx) AddressSpaceAttr(
Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex(),
static_cast<unsigned>(ASIdx));
// If the expression is not value dependent (not templated), then we can
// apply the address space qualifiers just to the equivalent type.
// Otherwise, we make an AttributedType with the modified and equivalent
// type the same, and wrap it in a DependentAddressSpaceType. When this
// dependent type is resolved, the qualifier is added to the equivalent type
// later.
QualType T;
if (!ASArgExpr->isValueDependent()) {
QualType EquivType =
S.BuildAddressSpaceAttr(Type, ASIdx, ASArgExpr, Attr.getLoc());
if (EquivType.isNull()) {
Attr.setInvalid();
return;
}
T = State.getAttributedType(ASAttr, Type, EquivType);
} else {
T = State.getAttributedType(ASAttr, Type, Type);
T = S.BuildAddressSpaceAttr(T, ASIdx, ASArgExpr, Attr.getLoc());
}
if (!T.isNull())
Type = T;
else
Attr.setInvalid();
} else {
// The keyword-based type attributes imply which address space to use.
ASIdx = Attr.asOpenCLLangAS();
if (ASIdx == LangAS::Default)
llvm_unreachable("Invalid address space");
if (DiagnoseMultipleAddrSpaceAttributes(S, Type.getAddressSpace(), ASIdx,
Attr.getLoc())) {
Attr.setInvalid();
return;
}
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
}
}
/// Does this type have a "direct" ownership qualifier? That is,
/// is it written like "__strong id", as opposed to something like
/// "typeof(foo)", where that happens to be strong?
static bool hasDirectOwnershipQualifier(QualType type) {
// Fast path: no qualifier at all.
assert(type.getQualifiers().hasObjCLifetime());
while (true) {
// __strong id
if (const AttributedType *attr = dyn_cast<AttributedType>(type)) {
if (attr->getAttrKind() == attr::ObjCOwnership)
return true;
type = attr->getModifiedType();
// X *__strong (...)
} else if (const ParenType *paren = dyn_cast<ParenType>(type)) {
type = paren->getInnerType();
// That's it for things we want to complain about. In particular,
// we do not want to look through typedefs, typeof(expr),
// typeof(type), or any other way that the type is somehow
// abstracted.
} else {
return false;
}
}
}
/// handleObjCOwnershipTypeAttr - Process an objc_ownership
/// attribute on the specified type.
///
/// Returns 'true' if the attribute was handled.
static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type) {
bool NonObjCPointer = false;
if (!type->isDependentType() && !type->isUndeducedType()) {
if (const PointerType *ptr = type->getAs<PointerType>()) {
QualType pointee = ptr->getPointeeType();
if (pointee->isObjCRetainableType() || pointee->isPointerType())
return false;
// It is important not to lose the source info that there was an attribute
// applied to non-objc pointer. We will create an attributed type but
// its type will be the same as the original type.
NonObjCPointer = true;
} else if (!type->isObjCRetainableType()) {
return false;
}
// Don't accept an ownership attribute in the declspec if it would
// just be the return type of a block pointer.
if (state.isProcessingDeclSpec()) {
Declarator &D = state.getDeclarator();
if (maybeMovePastReturnType(D, D.getNumTypeObjects(),
/*onlyBlockPointers=*/true))
return false;
}
}
Sema &S = state.getSema();
SourceLocation AttrLoc = attr.getLoc();
if (AttrLoc.isMacroID())
AttrLoc =
S.getSourceManager().getImmediateExpansionRange(AttrLoc).getBegin();
if (!attr.isArgIdent(0)) {
S.Diag(AttrLoc, diag::err_attribute_argument_type) << attr
<< AANT_ArgumentString;
attr.setInvalid();
return true;
}
IdentifierInfo *II = attr.getArgAsIdent(0)->Ident;
Qualifiers::ObjCLifetime lifetime;
if (II->isStr("none"))
lifetime = Qualifiers::OCL_ExplicitNone;
else if (II->isStr("strong"))
lifetime = Qualifiers::OCL_Strong;
else if (II->isStr("weak"))
lifetime = Qualifiers::OCL_Weak;
else if (II->isStr("autoreleasing"))
lifetime = Qualifiers::OCL_Autoreleasing;
else {
S.Diag(AttrLoc, diag::warn_attribute_type_not_supported)
<< attr.getName() << II;
attr.setInvalid();
return true;
}
// Just ignore lifetime attributes other than __weak and __unsafe_unretained
// outside of ARC mode.
if (!S.getLangOpts().ObjCAutoRefCount &&
lifetime != Qualifiers::OCL_Weak &&
lifetime != Qualifiers::OCL_ExplicitNone) {
return true;
}
SplitQualType underlyingType = type.split();
// Check for redundant/conflicting ownership qualifiers.
if (Qualifiers::ObjCLifetime previousLifetime
= type.getQualifiers().getObjCLifetime()) {
// If it's written directly, that's an error.
if (hasDirectOwnershipQualifier(type)) {
S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant)
<< type;
return true;
}
// Otherwise, if the qualifiers actually conflict, pull sugar off
// and remove the ObjCLifetime qualifiers.
if (previousLifetime != lifetime) {
// It's possible to have multiple local ObjCLifetime qualifiers. We
// can't stop after we reach a type that is directly qualified.
const Type *prevTy = nullptr;
while (!prevTy || prevTy != underlyingType.Ty) {
prevTy = underlyingType.Ty;
underlyingType = underlyingType.getSingleStepDesugaredType();
}
underlyingType.Quals.removeObjCLifetime();
}
}
underlyingType.Quals.addObjCLifetime(lifetime);
if (NonObjCPointer) {
StringRef name = attr.getName()->getName();
switch (lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
break;
case Qualifiers::OCL_Strong: name = "__strong"; break;
case Qualifiers::OCL_Weak: name = "__weak"; break;
case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break;
}
S.Diag(AttrLoc, diag::warn_type_attribute_wrong_type) << name
<< TDS_ObjCObjOrBlock << type;
}
// Don't actually add the __unsafe_unretained qualifier in non-ARC files,
// because having both 'T' and '__unsafe_unretained T' exist in the type
// system causes unfortunate widespread consistency problems. (For example,
// they're not considered compatible types, and we mangle them identicially
// as template arguments.) These problems are all individually fixable,
// but it's easier to just not add the qualifier and instead sniff it out
// in specific places using isObjCInertUnsafeUnretainedType().
//
// Doing this does means we miss some trivial consistency checks that
// would've triggered in ARC, but that's better than trying to solve all
// the coexistence problems with __unsafe_unretained.
if (!S.getLangOpts().ObjCAutoRefCount &&
lifetime == Qualifiers::OCL_ExplicitNone) {
type = state.getAttributedType(
createSimpleAttr<ObjCInertUnsafeUnretainedAttr>(S.Context, attr),
type, type);
return true;
}
QualType origType = type;
if (!NonObjCPointer)
type = S.Context.getQualifiedType(underlyingType);
// If we have a valid source location for the attribute, use an
// AttributedType instead.
if (AttrLoc.isValid()) {
type = state.getAttributedType(::new (S.Context) ObjCOwnershipAttr(
attr.getRange(), S.Context, II,
attr.getAttributeSpellingListIndex()),
origType, type);
}
auto diagnoseOrDelay = [](Sema &S, SourceLocation loc,
unsigned diagnostic, QualType type) {
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
S.DelayedDiagnostics.add(
sema::DelayedDiagnostic::makeForbiddenType(
S.getSourceManager().getExpansionLoc(loc),
diagnostic, type, /*ignored*/ 0));
} else {
S.Diag(loc, diagnostic);
}
};
// Sometimes, __weak isn't allowed.
if (lifetime == Qualifiers::OCL_Weak &&
!S.getLangOpts().ObjCWeak && !NonObjCPointer) {
// Use a specialized diagnostic if the runtime just doesn't support them.
unsigned diagnostic =
(S.getLangOpts().ObjCWeakRuntime ? diag::err_arc_weak_disabled
: diag::err_arc_weak_no_runtime);
// In any case, delay the diagnostic until we know what we're parsing.
diagnoseOrDelay(S, AttrLoc, diagnostic, type);
attr.setInvalid();
return true;
}
// Forbid __weak for class objects marked as
// objc_arc_weak_reference_unavailable
if (lifetime == Qualifiers::OCL_Weak) {
if (const ObjCObjectPointerType *ObjT =
type->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) {
if (Class->isArcWeakrefUnavailable()) {
S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
S.Diag(ObjT->getInterfaceDecl()->getLocation(),
diag::note_class_declared);
}
}
}
}
return true;
}
/// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type
/// attribute on the specified type. Returns true to indicate that
/// the attribute was handled, false to indicate that the type does
/// not permit the attribute.
static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type) {
Sema &S = state.getSema();
// Delay if this isn't some kind of pointer.
if (!type->isPointerType() &&
!type->isObjCObjectPointerType() &&
!type->isBlockPointerType())
return false;
if (type.getObjCGCAttr() != Qualifiers::GCNone) {
S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc);
attr.setInvalid();
return true;
}
// Check the attribute arguments.
if (!attr.isArgIdent(0)) {
S.Diag(attr.getLoc(), diag::err_attribute_argument_type)
<< attr << AANT_ArgumentString;
attr.setInvalid();
return true;
}
Qualifiers::GC GCAttr;
if (attr.getNumArgs() > 1) {
S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << attr
<< 1;
attr.setInvalid();
return true;
}
IdentifierInfo *II = attr.getArgAsIdent(0)->Ident;
if (II->isStr("weak"))
GCAttr = Qualifiers::Weak;
else if (II->isStr("strong"))
GCAttr = Qualifiers::Strong;
else {
S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported)
<< attr.getName() << II;
attr.setInvalid();
return true;
}
QualType origType = type;
type = S.Context.getObjCGCQualType(origType, GCAttr);
// Make an attributed type to preserve the source information.
if (attr.getLoc().isValid())
type = state.getAttributedType(
::new (S.Context) ObjCGCAttr(attr.getRange(), S.Context, II,
attr.getAttributeSpellingListIndex()),
origType, type);
return true;
}
namespace {
/// A helper class to unwrap a type down to a function for the
/// purposes of applying attributes there.
///
/// Use:
/// FunctionTypeUnwrapper unwrapped(SemaRef, T);
/// if (unwrapped.isFunctionType()) {
/// const FunctionType *fn = unwrapped.get();
/// // change fn somehow
/// T = unwrapped.wrap(fn);
/// }
struct FunctionTypeUnwrapper {
enum WrapKind {
Desugar,
Attributed,
Parens,
Pointer,
BlockPointer,
Reference,
- MemberPointer
+ MemberPointer,
+ MacroQualified,
};
QualType Original;
const FunctionType *Fn;
SmallVector<unsigned char /*WrapKind*/, 8> Stack;
FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) {
while (true) {
const Type *Ty = T.getTypePtr();
if (isa<FunctionType>(Ty)) {
Fn = cast<FunctionType>(Ty);
return;
} else if (isa<ParenType>(Ty)) {
T = cast<ParenType>(Ty)->getInnerType();
Stack.push_back(Parens);
} else if (isa<PointerType>(Ty)) {
T = cast<PointerType>(Ty)->getPointeeType();
Stack.push_back(Pointer);
} else if (isa<BlockPointerType>(Ty)) {
T = cast<BlockPointerType>(Ty)->getPointeeType();
Stack.push_back(BlockPointer);
} else if (isa<MemberPointerType>(Ty)) {
T = cast<MemberPointerType>(Ty)->getPointeeType();
Stack.push_back(MemberPointer);
} else if (isa<ReferenceType>(Ty)) {
T = cast<ReferenceType>(Ty)->getPointeeType();
Stack.push_back(Reference);
} else if (isa<AttributedType>(Ty)) {
T = cast<AttributedType>(Ty)->getEquivalentType();
Stack.push_back(Attributed);
+ } else if (isa<MacroQualifiedType>(Ty)) {
+ T = cast<MacroQualifiedType>(Ty)->getUnderlyingType();
+ Stack.push_back(MacroQualified);
} else {
const Type *DTy = Ty->getUnqualifiedDesugaredType();
if (Ty == DTy) {
Fn = nullptr;
return;
}
T = QualType(DTy, 0);
Stack.push_back(Desugar);
}
}
}
bool isFunctionType() const { return (Fn != nullptr); }
const FunctionType *get() const { return Fn; }
QualType wrap(Sema &S, const FunctionType *New) {
// If T wasn't modified from the unwrapped type, do nothing.
if (New == get()) return Original;
Fn = New;
return wrap(S.Context, Original, 0);
}
private:
QualType wrap(ASTContext &C, QualType Old, unsigned I) {
if (I == Stack.size())
return C.getQualifiedType(Fn, Old.getQualifiers());
// Build up the inner type, applying the qualifiers from the old
// type to the new type.
SplitQualType SplitOld = Old.split();
// As a special case, tail-recurse if there are no qualifiers.
if (SplitOld.Quals.empty())
return wrap(C, SplitOld.Ty, I);
return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals);
}
QualType wrap(ASTContext &C, const Type *Old, unsigned I) {
if (I == Stack.size()) return QualType(Fn, 0);
switch (static_cast<WrapKind>(Stack[I++])) {
case Desugar:
// This is the point at which we potentially lose source
// information.
return wrap(C, Old->getUnqualifiedDesugaredType(), I);
case Attributed:
return wrap(C, cast<AttributedType>(Old)->getEquivalentType(), I);
case Parens: {
QualType New = wrap(C, cast<ParenType>(Old)->getInnerType(), I);
return C.getParenType(New);
}
+
+ case MacroQualified:
+ return wrap(C, cast<MacroQualifiedType>(Old)->getUnderlyingType(), I);
case Pointer: {
QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
return C.getPointerType(New);
}
case BlockPointer: {
QualType New = wrap(C, cast<BlockPointerType>(Old)->getPointeeType(),I);
return C.getBlockPointerType(New);
}
case MemberPointer: {
const MemberPointerType *OldMPT = cast<MemberPointerType>(Old);
QualType New = wrap(C, OldMPT->getPointeeType(), I);
return C.getMemberPointerType(New, OldMPT->getClass());
}
case Reference: {
const ReferenceType *OldRef = cast<ReferenceType>(Old);
QualType New = wrap(C, OldRef->getPointeeType(), I);
if (isa<LValueReferenceType>(OldRef))
return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue());
else
return C.getRValueReferenceType(New);
}
}
llvm_unreachable("unknown wrapping kind");
}
};
} // end anonymous namespace
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
ParsedAttr &PAttr, QualType &Type) {
Sema &S = State.getSema();
Attr *A;
switch (PAttr.getKind()) {
default: llvm_unreachable("Unknown attribute kind");
case ParsedAttr::AT_Ptr32:
A = createSimpleAttr<Ptr32Attr>(S.Context, PAttr);
break;
case ParsedAttr::AT_Ptr64:
A = createSimpleAttr<Ptr64Attr>(S.Context, PAttr);
break;
case ParsedAttr::AT_SPtr:
A = createSimpleAttr<SPtrAttr>(S.Context, PAttr);
break;
case ParsedAttr::AT_UPtr:
A = createSimpleAttr<UPtrAttr>(S.Context, PAttr);
break;
}
attr::Kind NewAttrKind = A->getKind();
QualType Desugared = Type;
const AttributedType *AT = dyn_cast<AttributedType>(Type);
while (AT) {
attr::Kind CurAttrKind = AT->getAttrKind();
// You cannot specify duplicate type attributes, so if the attribute has
// already been applied, flag it.
if (NewAttrKind == CurAttrKind) {
S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact)
<< PAttr.getName();
return true;
}
// You cannot have both __sptr and __uptr on the same type, nor can you
// have __ptr32 and __ptr64.
if ((CurAttrKind == attr::Ptr32 && NewAttrKind == attr::Ptr64) ||
(CurAttrKind == attr::Ptr64 && NewAttrKind == attr::Ptr32)) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'" << "'__ptr64'";
return true;
} else if ((CurAttrKind == attr::SPtr && NewAttrKind == attr::UPtr) ||
(CurAttrKind == attr::UPtr && NewAttrKind == attr::SPtr)) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'" << "'__uptr'";
return true;
}
Desugared = AT->getEquivalentType();
AT = dyn_cast<AttributedType>(Desugared);
}
// Pointer type qualifiers can only operate on pointer types, but not
// pointer-to-member types.
//
// FIXME: Should we really be disallowing this attribute if there is any
// type sugar between it and the pointer (other than attributes)? Eg, this
// disallows the attribute on a parenthesized pointer.
// And if so, should we really allow *any* type attribute?
if (!isa<PointerType>(Desugared)) {
if (Type->isMemberPointerType())
S.Diag(PAttr.getLoc(), diag::err_attribute_no_member_pointers) << PAttr;
else
S.Diag(PAttr.getLoc(), diag::err_attribute_pointers_only) << PAttr << 0;
return true;
}
Type = State.getAttributedType(A, Type, Type);
return false;
}
/// Map a nullability attribute kind to a nullability kind.
static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
switch (kind) {
case ParsedAttr::AT_TypeNonNull:
return NullabilityKind::NonNull;
case ParsedAttr::AT_TypeNullable:
return NullabilityKind::Nullable;
case ParsedAttr::AT_TypeNullUnspecified:
return NullabilityKind::Unspecified;
default:
llvm_unreachable("not a nullability attribute kind");
}
}
/// Applies a nullability type specifier to the given type, if possible.
///
/// \param state The type processing state.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param attr The attribute as written on the type.
///
/// \param allowOnArrayType Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \returns true if a problem has been diagnosed, false on success.
static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
QualType &type,
ParsedAttr &attr,
bool allowOnArrayType) {
Sema &S = state.getSema();
NullabilityKind nullability = mapNullabilityAttrKind(attr.getKind());
SourceLocation nullabilityLoc = attr.getLoc();
bool isContextSensitive = attr.isContextSensitiveKeywordAttribute();
recordNullabilitySeen(S, nullabilityLoc);
// Check for existing nullability attributes on the type.
QualType desugared = type;
while (auto attributed = dyn_cast<AttributedType>(desugared.getTypePtr())) {
// Check whether there is already a null
if (auto existingNullability = attributed->getImmediateNullability()) {
// Duplicated nullability.
if (nullability == *existingNullability) {
S.Diag(nullabilityLoc, diag::warn_nullability_duplicate)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< FixItHint::CreateRemoval(nullabilityLoc);
break;
}
// Conflicting nullability.
S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< DiagNullabilityKind(*existingNullability, false);
return true;
}
desugared = attributed->getModifiedType();
}
// If there is already a different nullability specifier, complain.
// This (unlike the code above) looks through typedefs that might
// have nullability specifiers on them, which means we cannot
// provide a useful Fix-It.
if (auto existingNullability = desugared->getNullability(S.Context)) {
if (nullability != *existingNullability) {
S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< DiagNullabilityKind(*existingNullability, false);
// Try to find the typedef with the existing nullability specifier.
if (auto typedefType = desugared->getAs<TypedefType>()) {
TypedefNameDecl *typedefDecl = typedefType->getDecl();
QualType underlyingType = typedefDecl->getUnderlyingType();
if (auto typedefNullability
= AttributedType::stripOuterNullability(underlyingType)) {
if (*typedefNullability == *existingNullability) {
S.Diag(typedefDecl->getLocation(), diag::note_nullability_here)
<< DiagNullabilityKind(*existingNullability, false);
}
}
}
return true;
}
}
// If this definitely isn't a pointer type, reject the specifier.
if (!desugared->canHaveNullability() &&
!(allowOnArrayType && desugared->isArrayType())) {
S.Diag(nullabilityLoc, diag::err_nullability_nonpointer)
<< DiagNullabilityKind(nullability, isContextSensitive) << type;
return true;
}
// For the context-sensitive keywords/Objective-C property
// attributes, require that the type be a single-level pointer.
if (isContextSensitive) {
// Make sure that the pointee isn't itself a pointer type.
const Type *pointeeType;
if (desugared->isArrayType())
pointeeType = desugared->getArrayElementTypeNoTypeQual();
else
pointeeType = desugared->getPointeeType().getTypePtr();
if (pointeeType->isAnyPointerType() ||
pointeeType->isObjCObjectPointerType() ||
pointeeType->isMemberPointerType()) {
S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
<< DiagNullabilityKind(nullability, true)
<< type;
S.Diag(nullabilityLoc, diag::note_nullability_type_specifier)
<< DiagNullabilityKind(nullability, false)
<< type
<< FixItHint::CreateReplacement(nullabilityLoc,
getNullabilitySpelling(nullability));
return true;
}
}
// Form the attributed type.
type = state.getAttributedType(
createNullabilityAttr(S.Context, attr, nullability), type, type);
return false;
}
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
static bool checkObjCKindOfType(TypeProcessingState &state, QualType &type,
ParsedAttr &attr) {
Sema &S = state.getSema();
if (isa<ObjCTypeParamType>(type)) {
// Build the attributed type to record where __kindof occurred.
type = state.getAttributedType(
createSimpleAttr<ObjCKindOfAttr>(S.Context, attr), type, type);
return false;
}
// Find out if it's an Objective-C object or object pointer type;
const ObjCObjectPointerType *ptrType = type->getAs<ObjCObjectPointerType>();
const ObjCObjectType *objType = ptrType ? ptrType->getObjectType()
: type->getAs<ObjCObjectType>();
// If not, we can't apply __kindof.
if (!objType) {
// FIXME: Handle dependent types that aren't yet object types.
S.Diag(attr.getLoc(), diag::err_objc_kindof_nonobject)
<< type;
return true;
}
// Rebuild the "equivalent" type, which pushes __kindof down into
// the object type.
// There is no need to apply kindof on an unqualified id type.
QualType equivType = S.Context.getObjCObjectType(
objType->getBaseType(), objType->getTypeArgsAsWritten(),
objType->getProtocols(),
/*isKindOf=*/objType->isObjCUnqualifiedId() ? false : true);
// If we started with an object pointer type, rebuild it.
if (ptrType) {
equivType = S.Context.getObjCObjectPointerType(equivType);
if (auto nullability = type->getNullability(S.Context)) {
// We create a nullability attribute from the __kindof attribute.
// Make sure that will make sense.
assert(attr.getAttributeSpellingListIndex() == 0 &&
"multiple spellings for __kindof?");
Attr *A = createNullabilityAttr(S.Context, attr, *nullability);
A->setImplicit(true);
equivType = state.getAttributedType(A, equivType, equivType);
}
}
// Build the attributed type to record where __kindof occurred.
type = state.getAttributedType(
createSimpleAttr<ObjCKindOfAttr>(S.Context, attr), type, equivType);
return false;
}
/// Distribute a nullability type attribute that cannot be applied to
/// the type specifier to a pointer, block pointer, or member pointer
/// declarator, complaining if necessary.
///
/// \returns true if the nullability annotation was distributed, false
/// otherwise.
static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
QualType type, ParsedAttr &attr) {
Declarator &declarator = state.getDeclarator();
/// Attempt to move the attribute to the specified chunk.
auto moveToChunk = [&](DeclaratorChunk &chunk, bool inFunction) -> bool {
// If there is already a nullability attribute there, don't add
// one.
if (hasNullabilityAttr(chunk.getAttrs()))
return false;
// Complain about the nullability qualifier being in the wrong
// place.
enum {
PK_Pointer,
PK_BlockPointer,
PK_MemberPointer,
PK_FunctionPointer,
PK_MemberFunctionPointer,
} pointerKind
= chunk.Kind == DeclaratorChunk::Pointer ? (inFunction ? PK_FunctionPointer
: PK_Pointer)
: chunk.Kind == DeclaratorChunk::BlockPointer ? PK_BlockPointer
: inFunction? PK_MemberFunctionPointer : PK_MemberPointer;
auto diag = state.getSema().Diag(attr.getLoc(),
diag::warn_nullability_declspec)
<< DiagNullabilityKind(mapNullabilityAttrKind(attr.getKind()),
attr.isContextSensitiveKeywordAttribute())
<< type
<< static_cast<unsigned>(pointerKind);
// FIXME: MemberPointer chunks don't carry the location of the *.
if (chunk.Kind != DeclaratorChunk::MemberPointer) {
diag << FixItHint::CreateRemoval(attr.getLoc())
<< FixItHint::CreateInsertion(
state.getSema().getPreprocessor()
.getLocForEndOfToken(chunk.Loc),
" " + attr.getName()->getName().str() + " ");
}
moveAttrFromListToList(attr, state.getCurrentAttributes(),
chunk.getAttrs());
return true;
};
// Move it to the outermost pointer, member pointer, or block
// pointer declarator.
for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
switch (chunk.Kind) {
case DeclaratorChunk::Pointer:
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
return moveToChunk(chunk, false);
case DeclaratorChunk::Paren:
case DeclaratorChunk::Array:
continue;
case DeclaratorChunk::Function:
// Try to move past the return type to a function/block/member
// function pointer.
if (DeclaratorChunk *dest = maybeMovePastReturnType(
declarator, i,
/*onlyBlockPointers=*/false)) {
return moveToChunk(*dest, true);
}
return false;
// Don't walk through these.
case DeclaratorChunk::Reference:
case DeclaratorChunk::Pipe:
return false;
}
}
return false;
}
static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
assert(!Attr.isInvalid());
switch (Attr.getKind()) {
default:
llvm_unreachable("not a calling convention attribute");
case ParsedAttr::AT_CDecl:
return createSimpleAttr<CDeclAttr>(Ctx, Attr);
case ParsedAttr::AT_FastCall:
return createSimpleAttr<FastCallAttr>(Ctx, Attr);
case ParsedAttr::AT_StdCall:
return createSimpleAttr<StdCallAttr>(Ctx, Attr);
case ParsedAttr::AT_ThisCall:
return createSimpleAttr<ThisCallAttr>(Ctx, Attr);
case ParsedAttr::AT_RegCall:
return createSimpleAttr<RegCallAttr>(Ctx, Attr);
case ParsedAttr::AT_Pascal:
return createSimpleAttr<PascalAttr>(Ctx, Attr);
case ParsedAttr::AT_SwiftCall:
return createSimpleAttr<SwiftCallAttr>(Ctx, Attr);
case ParsedAttr::AT_VectorCall:
return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
case ParsedAttr::AT_AArch64VectorPcs:
return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
// but the form may not be.
StringRef Str;
if (Attr.isArgExpr(0))
Str = cast<StringLiteral>(Attr.getArgAsExpr(0))->getString();
else
Str = Attr.getArgAsIdent(0)->Ident->getName();
PcsAttr::PCSType Type;
if (!PcsAttr::ConvertStrToPCSType(Str, Type))
llvm_unreachable("already validated the attribute");
return ::new (Ctx) PcsAttr(Attr.getRange(), Ctx, Type,
Attr.getAttributeSpellingListIndex());
}
case ParsedAttr::AT_IntelOclBicc:
return createSimpleAttr<IntelOclBiccAttr>(Ctx, Attr);
case ParsedAttr::AT_MSABI:
return createSimpleAttr<MSABIAttr>(Ctx, Attr);
case ParsedAttr::AT_SysVABI:
return createSimpleAttr<SysVABIAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveMost:
return createSimpleAttr<PreserveMostAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveAll:
return createSimpleAttr<PreserveAllAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
/// Process an individual function attribute. Returns true to
/// indicate that the attribute was handled, false if it wasn't.
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type) {
Sema &S = state.getSema();
FunctionTypeUnwrapper unwrapped(S, type);
if (attr.getKind() == ParsedAttr::AT_NoReturn) {
if (S.CheckAttrNoArgs(attr))
return true;
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
return false;
// Otherwise we can process right away.
FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
return true;
}
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
if (attr.getKind() == ParsedAttr::AT_NSReturnsRetained) {
if (attr.getNumArgs()) return true;
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
return false;
// Check whether the return type is reasonable.
if (S.checkNSReturnsRetainedReturnType(attr.getLoc(),
unwrapped.get()->getReturnType()))
return true;
// Only actually change the underlying type in ARC builds.
QualType origType = type;
if (state.getSema().getLangOpts().ObjCAutoRefCount) {
FunctionType::ExtInfo EI
= unwrapped.get()->getExtInfo().withProducesResult(true);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
}
type = state.getAttributedType(
createSimpleAttr<NSReturnsRetainedAttr>(S.Context, attr),
origType, type);
return true;
}
if (attr.getKind() == ParsedAttr::AT_AnyX86NoCallerSavedRegisters) {
if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr))
return true;
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
return false;
FunctionType::ExtInfo EI =
unwrapped.get()->getExtInfo().withNoCallerSavedRegs(true);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
return true;
}
if (attr.getKind() == ParsedAttr::AT_AnyX86NoCfCheck) {
if (!S.getLangOpts().CFProtectionBranch) {
S.Diag(attr.getLoc(), diag::warn_nocf_check_attribute_ignored);
attr.setInvalid();
return true;
}
if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr))
return true;
// If this is not a function type, warning will be asserted by subject
// check.
if (!unwrapped.isFunctionType())
return true;
FunctionType::ExtInfo EI =
unwrapped.get()->getExtInfo().withNoCfCheck(true);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
return true;
}
if (attr.getKind() == ParsedAttr::AT_Regparm) {
unsigned value;
if (S.CheckRegparmAttr(attr, value))
return true;
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
return false;
// Diagnose regparm with fastcall.
const FunctionType *fn = unwrapped.get();
CallingConv CC = fn->getCallConv();
if (CC == CC_X86FastCall) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
<< FunctionType::getNameForCallConv(CC)
<< "regparm";
attr.setInvalid();
return true;
}
FunctionType::ExtInfo EI =
unwrapped.get()->getExtInfo().withRegParm(value);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
return true;
}
if (attr.getKind() == ParsedAttr::AT_NoThrow) {
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
return false;
if (S.CheckAttrNoArgs(attr)) {
attr.setInvalid();
return true;
}
// Otherwise we can process right away.
auto *Proto = unwrapped.get()->castAs<FunctionProtoType>();
// MSVC ignores nothrow if it is in conflict with an explicit exception
// specification.
if (Proto->hasExceptionSpec()) {
switch (Proto->getExceptionSpecType()) {
case EST_None:
llvm_unreachable("This doesn't have an exception spec!");
case EST_DynamicNone:
case EST_BasicNoexcept:
case EST_NoexceptTrue:
case EST_NoThrow:
// Exception spec doesn't conflict with nothrow, so don't warn.
LLVM_FALLTHROUGH;
case EST_Unparsed:
case EST_Uninstantiated:
case EST_DependentNoexcept:
case EST_Unevaluated:
// We don't have enough information to properly determine if there is a
// conflict, so suppress the warning.
break;
case EST_Dynamic:
case EST_MSAny:
case EST_NoexceptFalse:
S.Diag(attr.getLoc(), diag::warn_nothrow_attribute_ignored);
break;
}
return true;
}
type = unwrapped.wrap(
S, S.Context
.getFunctionTypeWithExceptionSpec(
QualType{Proto, 0},
FunctionProtoType::ExceptionSpecInfo{EST_NoThrow})
->getAs<FunctionType>());
return true;
}
// Delay if the type didn't work out to a function.
if (!unwrapped.isFunctionType()) return false;
// Otherwise, a calling convention.
CallingConv CC;
if (S.CheckCallingConvAttr(attr, CC))
return true;
const FunctionType *fn = unwrapped.get();
CallingConv CCOld = fn->getCallConv();
Attr *CCAttr = getCCTypeAttr(S.Context, attr);
if (CCOld != CC) {
// Error out on when there's already an attribute on the type
// and the CCs don't match.
if (S.getCallingConvAttributedType(type)) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
<< FunctionType::getNameForCallConv(CC)
<< FunctionType::getNameForCallConv(CCOld);
attr.setInvalid();
return true;
}
}
// Diagnose use of variadic functions with calling conventions that
// don't support them (e.g. because they're callee-cleanup).
// We delay warning about this on unprototyped function declarations
// until after redeclaration checking, just in case we pick up a
// prototype that way. And apparently we also "delay" warning about
// unprototyped function types in general, despite not necessarily having
// much ability to diagnose it later.
if (!supportsVariadicCall(CC)) {
const FunctionProtoType *FnP = dyn_cast<FunctionProtoType>(fn);
if (FnP && FnP->isVariadic()) {
// stdcall and fastcall are ignored with a warning for GCC and MS
// compatibility.
if (CC == CC_X86StdCall || CC == CC_X86FastCall)
return S.Diag(attr.getLoc(), diag::warn_cconv_unsupported)
<< FunctionType::getNameForCallConv(CC)
<< (int)Sema::CallingConventionIgnoredReason::VariadicFunction;
attr.setInvalid();
return S.Diag(attr.getLoc(), diag::err_cconv_varargs)
<< FunctionType::getNameForCallConv(CC);
}
}
// Also diagnose fastcall with regparm.
if (CC == CC_X86FastCall && fn->getHasRegParm()) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
<< "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall);
attr.setInvalid();
return true;
}
// Modify the CC from the wrapped function type, wrap it all back, and then
// wrap the whole thing in an AttributedType as written. The modified type
// might have a different CC if we ignored the attribute.
QualType Equivalent;
if (CCOld == CC) {
Equivalent = type;
} else {
auto EI = unwrapped.get()->getExtInfo().withCallingConv(CC);
Equivalent =
unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
}
type = state.getAttributedType(CCAttr, type, Equivalent);
return true;
}
bool Sema::hasExplicitCallingConv(QualType T) {
const AttributedType *AT;
// Stop if we'd be stripping off a typedef sugar node to reach the
// AttributedType.
while ((AT = T->getAs<AttributedType>()) &&
AT->getAs<TypedefType>() == T->getAs<TypedefType>()) {
if (AT->isCallingConv())
return true;
T = AT->getModifiedType();
}
return false;
}
void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc) {
FunctionTypeUnwrapper Unwrapped(*this, T);
const FunctionType *FT = Unwrapped.get();
bool IsVariadic = (isa<FunctionProtoType>(FT) &&
cast<FunctionProtoType>(FT)->isVariadic());
CallingConv CurCC = FT->getCallConv();
CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
if (CurCC == ToCC)
return;
// MS compiler ignores explicit calling convention attributes on structors. We
// should do the same.
if (Context.getTargetInfo().getCXXABI().isMicrosoft() && IsCtorOrDtor) {
// Issue a warning on ignored calling convention -- except of __stdcall.
// Again, this is what MS compiler does.
if (CurCC != CC_X86StdCall)
Diag(Loc, diag::warn_cconv_unsupported)
<< FunctionType::getNameForCallConv(CurCC)
<< (int)Sema::CallingConventionIgnoredReason::ConstructorDestructor;
// Default adjustment.
} else {
// Only adjust types with the default convention. For example, on Windows
// we should adjust a __cdecl type to __thiscall for instance methods, and a
// __thiscall type to __cdecl for static methods.
CallingConv DefaultCC =
Context.getDefaultCallingConvention(IsVariadic, IsStatic);
if (CurCC != DefaultCC || DefaultCC == ToCC)
return;
if (hasExplicitCallingConv(T))
return;
}
FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC));
QualType Wrapped = Unwrapped.wrap(*this, FT);
T = Context.getAdjustedType(T, Wrapped);
}
/// HandleVectorSizeAttribute - this attribute is only applicable to integral
/// and float scalars, although arrays, pointers, and function return values are
/// allowed in conjunction with this construct. Aggregates with this attribute
/// are invalid, even if they are of the same size as a corresponding scalar.
/// The raw attribute should contain precisely 1 argument, the vector size for
/// the variable, measured in bytes. If curType and rawAttr are well formed,
/// this routine will return a new vector type.
static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
<< 1;
Attr.setInvalid();
return;
}
Expr *SizeExpr;
// Special case where the argument is a template id.
if (Attr.isArgIdent(0)) {
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Id;
Id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
Id, /*HasTrailingLParen=*/false,
/*IsAddressOfOperand=*/false);
if (Size.isInvalid())
return;
SizeExpr = Size.get();
} else {
SizeExpr = Attr.getArgAsExpr(0);
}
QualType T = S.BuildVectorType(CurType, SizeExpr, Attr.getLoc());
if (!T.isNull())
CurType = T;
else
Attr.setInvalid();
}
/// Process the OpenCL-like ext_vector_type attribute when it occurs on
/// a type.
static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
<< 1;
return;
}
Expr *sizeExpr;
// Special case where the argument is a template id.
if (Attr.isArgIdent(0)) {
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
id, /*HasTrailingLParen=*/false,
/*IsAddressOfOperand=*/false);
if (Size.isInvalid())
return;
sizeExpr = Size.get();
} else {
sizeExpr = Attr.getArgAsExpr(0);
}
// Create the vector type.
QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
if (!T.isNull())
CurType = T;
}
static bool isPermittedNeonBaseType(QualType &Ty,
VectorType::VectorKind VecKind, Sema &S) {
const BuiltinType *BTy = Ty->getAs<BuiltinType>();
if (!BTy)
return false;
llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
// Signed poly is mathematically wrong, but has been baked into some ABIs by
// now.
bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::aarch64_be;
if (VecKind == VectorType::NeonPolyVector) {
if (IsPolyUnsigned) {
// AArch64 polynomial vectors are unsigned and support poly64.
return BTy->getKind() == BuiltinType::UChar ||
BTy->getKind() == BuiltinType::UShort ||
BTy->getKind() == BuiltinType::ULong ||
BTy->getKind() == BuiltinType::ULongLong;
} else {
// AArch32 polynomial vector are signed.
return BTy->getKind() == BuiltinType::SChar ||
BTy->getKind() == BuiltinType::Short;
}
}
// Non-polynomial vector types: the usual suspects are allowed, as well as
// float64_t on AArch64.
bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::aarch64_be;
if (Is64Bit && BTy->getKind() == BuiltinType::Double)
return true;
return BTy->getKind() == BuiltinType::SChar ||
BTy->getKind() == BuiltinType::UChar ||
BTy->getKind() == BuiltinType::Short ||
BTy->getKind() == BuiltinType::UShort ||
BTy->getKind() == BuiltinType::Int ||
BTy->getKind() == BuiltinType::UInt ||
BTy->getKind() == BuiltinType::Long ||
BTy->getKind() == BuiltinType::ULong ||
BTy->getKind() == BuiltinType::LongLong ||
BTy->getKind() == BuiltinType::ULongLong ||
BTy->getKind() == BuiltinType::Float ||
BTy->getKind() == BuiltinType::Half;
}
/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
/// "neon_polyvector_type" attributes are used to create vector types that
/// are mangled according to ARM's ABI. Otherwise, these types are identical
/// to those created with the "vector_size" attribute. Unlike "vector_size"
/// the argument to these Neon attributes is the number of vector elements,
/// not the vector size in bytes. The vector width and element type must
/// match one of the standard Neon vector types.
static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S, VectorType::VectorKind VecKind) {
// Target must have NEON
if (!S.Context.getTargetInfo().hasFeature("neon")) {
S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr;
Attr.setInvalid();
return;
}
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
<< 1;
Attr.setInvalid();
return;
}
// The number of elements must be an ICE.
Expr *numEltsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
llvm::APSInt numEltsInt(32);
if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
!numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
<< Attr << AANT_ArgumentIntegerConstant
<< numEltsExpr->getSourceRange();
Attr.setInvalid();
return;
}
// Only certain element types are supported for Neon vectors.
if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
Attr.setInvalid();
return;
}
// The total size of the vector must be 64 or 128 bits.
unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
unsigned numElts = static_cast<unsigned>(numEltsInt.getZExtValue());
unsigned vecSize = typeSize * numElts;
if (vecSize != 64 && vecSize != 128) {
S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType;
Attr.setInvalid();
return;
}
CurType = S.Context.getVectorType(CurType, numElts, VecKind);
}
/// Handle OpenCL Access Qualifier Attribute.
static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// OpenCL v2.0 s6.6 - Access qualifier can be used only for image and pipe type.
if (!(CurType->isImageType() || CurType->isPipeType())) {
S.Diag(Attr.getLoc(), diag::err_opencl_invalid_access_qualifier);
Attr.setInvalid();
return;
}
if (const TypedefType* TypedefTy = CurType->getAs<TypedefType>()) {
QualType BaseTy = TypedefTy->desugar();
std::string PrevAccessQual;
if (BaseTy->isPipeType()) {
if (TypedefTy->getDecl()->hasAttr<OpenCLAccessAttr>()) {
OpenCLAccessAttr *Attr =
TypedefTy->getDecl()->getAttr<OpenCLAccessAttr>();
PrevAccessQual = Attr->getSpelling();
} else {
PrevAccessQual = "read_only";
}
} else if (const BuiltinType* ImgType = BaseTy->getAs<BuiltinType>()) {
switch (ImgType->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
PrevAccessQual = #Access; \
break;
#include "clang/Basic/OpenCLImageTypes.def"
default:
llvm_unreachable("Unable to find corresponding image type.");
}
} else {
llvm_unreachable("unexpected type");
}
StringRef AttrName = Attr.getName()->getName();
if (PrevAccessQual == AttrName.ltrim("_")) {
// Duplicated qualifiers
S.Diag(Attr.getLoc(), diag::warn_duplicate_declspec)
<< AttrName << Attr.getRange();
} else {
// Contradicting qualifiers
S.Diag(Attr.getLoc(), diag::err_opencl_multiple_access_qualifiers);
}
S.Diag(TypedefTy->getDecl()->getBeginLoc(),
diag::note_opencl_typedef_access_qualifier) << PrevAccessQual;
} else if (CurType->isPipeType()) {
if (Attr.getSemanticSpelling() == OpenCLAccessAttr::Keyword_write_only) {
QualType ElemType = CurType->getAs<PipeType>()->getElementType();
CurType = S.Context.getWritePipeType(ElemType);
}
}
}
static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
QualType &T, TypeAttrLocation TAL) {
Declarator &D = State.getDeclarator();
// Handle the cases where address space should not be deduced.
//
// The pointee type of a pointer type is always deduced since a pointer always
// points to some memory location which should has an address space.
//
// There are situations that at the point of certain declarations, the address
// space may be unknown and better to be left as default. For example, when
// defining a typedef or struct type, they are not associated with any
// specific address space. Later on, they may be used with any address space
// to declare a variable.
//
// The return value of a function is r-value, therefore should not have
// address space.
//
// The void type does not occupy memory, therefore should not have address
// space, except when it is used as a pointee type.
//
// Since LLVM assumes function type is in default address space, it should not
// have address space.
auto ChunkIndex = State.getCurrentChunkIndex();
bool IsPointee =
ChunkIndex > 0 &&
(D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Pointer ||
D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Reference ||
D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::BlockPointer);
// For pointers/references to arrays the next chunk is always an array
// followed by any number of parentheses.
if (!IsPointee && ChunkIndex > 1) {
auto AdjustedCI = ChunkIndex - 1;
if (D.getTypeObject(AdjustedCI).Kind == DeclaratorChunk::Array)
AdjustedCI--;
// Skip over all parentheses.
while (AdjustedCI > 0 &&
D.getTypeObject(AdjustedCI).Kind == DeclaratorChunk::Paren)
AdjustedCI--;
if (D.getTypeObject(AdjustedCI).Kind == DeclaratorChunk::Pointer ||
D.getTypeObject(AdjustedCI).Kind == DeclaratorChunk::Reference)
IsPointee = true;
}
bool IsFuncReturnType =
ChunkIndex > 0 &&
D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Function;
bool IsFuncType =
ChunkIndex < D.getNumTypeObjects() &&
D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function;
if ( // Do not deduce addr space for function return type and function type,
// otherwise it will fail some sema check.
IsFuncReturnType || IsFuncType ||
// Do not deduce addr space for member types of struct, except the pointee
// type of a pointer member type or static data members.
(D.getContext() == DeclaratorContext::MemberContext &&
(!IsPointee &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)) ||
// Do not deduce addr space of non-pointee in type alias because it
// doesn't define any object.
(D.getContext() == DeclaratorContext::AliasDeclContext && !IsPointee) ||
// Do not deduce addr space for types used to define a typedef and the
// typedef itself, except the pointee type of a pointer type which is used
// to define the typedef.
(D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef &&
!IsPointee) ||
// Do not deduce addr space of the void type, e.g. in f(void), otherwise
// it will fail some sema check.
(T->isVoidType() && !IsPointee) ||
// Do not deduce addr spaces for dependent types because they might end
// up instantiating to a type with an explicit address space qualifier.
// Except for pointer or reference types because the addr space in
// template argument can only belong to a pointee.
(T->isDependentType() && !T->isPointerType() && !T->isReferenceType()) ||
// Do not deduce addr space of decltype because it will be taken from
// its argument.
T->isDecltypeType() ||
// OpenCL spec v2.0 s6.9.b:
// The sampler type cannot be used with the __local and __global address
// space qualifiers.
// OpenCL spec v2.0 s6.13.14:
// Samplers can also be declared as global constants in the program
// source using the following syntax.
// const sampler_t <sampler name> = <value>
// In codegen, file-scope sampler type variable has special handing and
// does not rely on address space qualifier. On the other hand, deducing
// address space of const sampler file-scope variable as global address
// space causes spurious diagnostic about __global address space
// qualifier, therefore do not deduce address space of file-scope sampler
// type variable.
(D.getContext() == DeclaratorContext::FileContext && T->isSamplerT()))
return;
LangAS ImpAddr = LangAS::Default;
// Put OpenCL automatic variable in private address space.
// OpenCL v1.2 s6.5:
// The default address space name for arguments to a function in a
// program, or local variables of a function is __private. All function
// arguments shall be in the __private address space.
if (State.getSema().getLangOpts().OpenCLVersion <= 120 &&
!State.getSema().getLangOpts().OpenCLCPlusPlus) {
ImpAddr = LangAS::opencl_private;
} else {
// If address space is not set, OpenCL 2.0 defines non private default
// address spaces for some cases:
// OpenCL 2.0, section 6.5:
// The address space for a variable at program scope or a static variable
// inside a function can either be __global or __constant, but defaults to
// __global if not specified.
// (...)
// Pointers that are declared without pointing to a named address space
// point to the generic address space.
if (IsPointee) {
ImpAddr = LangAS::opencl_generic;
} else {
if (D.getContext() == DeclaratorContext::TemplateArgContext) {
// Do not deduce address space for non-pointee type in template arg.
} else if (D.getContext() == DeclaratorContext::FileContext) {
ImpAddr = LangAS::opencl_global;
} else {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern) {
ImpAddr = LangAS::opencl_global;
} else {
ImpAddr = LangAS::opencl_private;
}
}
}
}
T = State.getSema().Context.getAddrSpaceQualType(T, ImpAddr);
}
static void HandleLifetimeBoundAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
if (State.getDeclarator().isDeclarationOfFunction()) {
CurType = State.getAttributedType(
createSimpleAttr<LifetimeBoundAttr>(State.getSema().Context, Attr),
CurType, CurType);
} else {
Attr.diagnoseAppertainsTo(State.getSema(), nullptr);
}
}
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL,
ParsedAttributesView &attrs) {
// Scan through and apply attributes to this type where it makes sense. Some
// attributes (such as __address_space__, __vector_size__, etc) apply to the
// type, but others can be present in the type specifiers even though they
// apply to the decl. Here we apply type attributes and ignore the rest.
// This loop modifies the list pretty frequently, but we still need to make
// sure we visit every element once. Copy the attributes list, and iterate
// over that.
ParsedAttributesView AttrsCopy{attrs};
state.setParsedNoDeref(false);
for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
if (attr.isInvalid())
continue;
if (attr.isCXX11Attribute()) {
// [[gnu::...]] attributes are treated as declaration attributes, so may
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
// incompatibility.
if (attr.isGNUScope()) {
bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
IsTypeAttr
? diag::warn_gcc_ignores_type_attr
: diag::warn_cxx11_gnu_attribute_on_type)
<< attr.getName();
if (!IsTypeAttr)
continue;
}
} else if (TAL != TAL_DeclChunk &&
attr.getKind() != ParsedAttr::AT_AddressSpace) {
// Otherwise, only consider type processing for a C++11 attribute if
// it's actually been applied to a type.
// We also allow C++11 address_space attributes to pass through.
continue;
}
}
// If this is an attribute we can handle, do so now,
// otherwise, add it to the FnAttrs list for rechaining.
switch (attr.getKind()) {
default:
// A C++11 attribute on a declarator chunk must appertain to a type.
if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
<< attr;
attr.setUsedAsTypeAttr();
}
break;
case ParsedAttr::UnknownAttribute:
if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk)
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
<< attr.getName();
break;
case ParsedAttr::IgnoredAttribute:
break;
case ParsedAttr::AT_MayAlias:
// FIXME: This attribute needs to actually be handled, but if we ignore
// it it breaks large amounts of Linux software.
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_OpenCLPrivateAddressSpace:
case ParsedAttr::AT_OpenCLGlobalAddressSpace:
case ParsedAttr::AT_OpenCLLocalAddressSpace:
case ParsedAttr::AT_OpenCLConstantAddressSpace:
case ParsedAttr::AT_OpenCLGenericAddressSpace:
case ParsedAttr::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state);
attr.setUsedAsTypeAttr();
break;
OBJC_POINTER_TYPE_ATTRS_CASELIST:
if (!handleObjCPointerTypeAttr(state, attr, type))
distributeObjCPointerTypeAttr(state, attr, type);
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_VectorSize:
HandleVectorSizeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_ExtVectorType:
HandleExtVectorTypeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_NeonVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonVector);
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_NeonPolyVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonPolyVector);
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_LifetimeBound:
if (TAL == TAL_DeclChunk)
HandleLifetimeBoundAttr(state, type, attr);
break;
case ParsedAttr::AT_NoDeref: {
ASTContext &Ctx = state.getSema().Context;
type = state.getAttributedType(createSimpleAttr<NoDerefAttr>(Ctx, attr),
type, type);
attr.setUsedAsTypeAttr();
state.setParsedNoDeref(true);
break;
}
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
break;
NULLABILITY_TYPE_ATTRS_CASELIST:
// Either add nullability here or try to distribute it. We
// don't want to distribute the nullability specifier past any
// dependent type, because that complicates the user model.
if (type->canHaveNullability() || type->isDependentType() ||
type->isArrayType() ||
!distributeNullabilityTypeAttr(state, type, attr)) {
unsigned endIndex;
if (TAL == TAL_DeclChunk)
endIndex = state.getCurrentChunkIndex();
else
endIndex = state.getDeclarator().getNumTypeObjects();
bool allowOnArrayType =
state.getDeclarator().isPrototypeContext() &&
!hasOuterPointerLikeChunk(state.getDeclarator(), endIndex);
if (checkNullabilityTypeSpecifier(
state,
type,
attr,
allowOnArrayType)) {
attr.setInvalid();
}
attr.setUsedAsTypeAttr();
}
break;
case ParsedAttr::AT_ObjCKindOf:
// '__kindof' must be part of the decl-specifiers.
switch (TAL) {
case TAL_DeclSpec:
break;
case TAL_DeclChunk:
case TAL_DeclName:
state.getSema().Diag(attr.getLoc(),
diag::err_objc_kindof_wrong_position)
<< FixItHint::CreateRemoval(attr.getLoc())
<< FixItHint::CreateInsertion(
state.getDeclarator().getDeclSpec().getBeginLoc(),
"__kindof ");
break;
}
// Apply it regardless.
if (checkObjCKindOfType(state, type, attr))
attr.setInvalid();
break;
case ParsedAttr::AT_NoThrow:
// Exception Specifications aren't generally supported in C mode throughout
// clang, so revert to attribute-based handling for C.
if (!state.getSema().getLangOpts().CPlusPlus)
break;
LLVM_FALLTHROUGH;
FUNCTION_TYPE_ATTRS_CASELIST:
attr.setUsedAsTypeAttr();
// Never process function type attributes as part of the
// declaration-specifiers.
if (TAL == TAL_DeclSpec)
distributeFunctionTypeAttrFromDeclSpec(state, attr, type);
// Otherwise, handle the possible delays.
else if (!handleFunctionTypeAttr(state, attr, type))
distributeFunctionTypeAttr(state, attr, type);
break;
}
// Handle attributes that are defined in a macro. We do not want this to be
// applied to ObjC builtin attributes.
if (isa<AttributedType>(type) && attr.hasMacroIdentifier() &&
!type.getQualifiers().hasObjCLifetime() &&
!type.getQualifiers().hasObjCGCAttr() &&
attr.getKind() != ParsedAttr::AT_ObjCGC &&
attr.getKind() != ParsedAttr::AT_ObjCOwnership) {
const IdentifierInfo *MacroII = attr.getMacroIdentifier();
type = state.getSema().Context.getMacroQualifiedType(type, MacroII);
state.setExpansionLocForMacroQualifiedType(
cast<MacroQualifiedType>(type.getTypePtr()),
attr.getMacroExpansionLoc());
}
}
if (!state.getSema().getLangOpts().OpenCL ||
type.getAddressSpace() != LangAS::Default)
return;
deduceOpenCLImplicitAddrSpace(state, type, TAL);
}
void Sema::completeExprArrayBound(Expr *E) {
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) {
auto *Def = Var->getDefinition();
if (!Def) {
SourceLocation PointOfInstantiation = E->getExprLoc();
InstantiateVariableDefinition(PointOfInstantiation, Var);
Def = Var->getDefinition();
// If we don't already have a point of instantiation, and we managed
// to instantiate a definition, this is the point of instantiation.
// Otherwise, we don't request an end-of-TU instantiation, so this is
// not a point of instantiation.
// FIXME: Is this really the right behavior?
if (Var->getPointOfInstantiation().isInvalid() && Def) {
assert(Var->getTemplateSpecializationKind() ==
TSK_ImplicitInstantiation &&
"explicit instantiation with no point of instantiation");
Var->setTemplateSpecializationKind(
Var->getTemplateSpecializationKind(), PointOfInstantiation);
}
}
// Update the type to the definition's type both here and within the
// expression.
if (Def) {
DRE->setDecl(Def);
QualType T = Def->getType();
DRE->setType(T);
// FIXME: Update the type on all intervening expressions.
E->setType(T);
}
// We still go on to try to complete the type independently, as it
// may also require instantiations or diagnostics if it remains
// incomplete.
}
}
}
}
/// Ensure that the type of the given expression is complete.
///
/// This routine checks whether the expression \p E has a complete type. If the
/// expression refers to an instantiable construct, that instantiation is
/// performed as needed to complete its type. Furthermore
/// Sema::RequireCompleteType is called for the expression's type (or in the
/// case of a reference type, the referred-to type).
///
/// \param E The expression whose type is required to be complete.
/// \param Diagnoser The object that will emit a diagnostic if the type is
/// incomplete.
///
/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
/// otherwise.
bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
QualType T = E->getType();
// Incomplete array types may be completed by the initializer attached to
// their definitions. For static data members of class templates and for
// variable templates, we need to instantiate the definition to get this
// initializer and complete the type.
if (T->isIncompleteArrayType()) {
completeExprArrayBound(E);
T = E->getType();
}
// FIXME: Are there other cases which require instantiating something other
// than the type to complete the type of an expression?
return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
}
bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireCompleteExprType(E, Diagnoser);
}
/// Ensure that the type T is a complete type.
///
/// This routine checks whether the type @p T is complete in any
/// context where a complete type is required. If @p T is a complete
/// type, returns false. If @p T is a class template specialization,
/// this routine then attempts to perform class template
/// instantiation. If instantiation fails, or if @p T is incomplete
/// and cannot be completed, issues the diagnostic @p diag (giving it
/// the type @p T) and returns true.
///
/// @param Loc The location in the source that the incomplete type
/// diagnostic should refer to.
///
/// @param T The type that this routine is examining for completeness.
///
/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
if (RequireCompleteTypeImpl(Loc, T, &Diagnoser))
return true;
if (const TagType *Tag = T->getAs<TagType>()) {
if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
Tag->getDecl()->setCompleteDefinitionRequired();
Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl());
}
}
return false;
}
bool Sema::hasStructuralCompatLayout(Decl *D, Decl *Suggested) {
llvm::DenseSet<std::pair<Decl *, Decl *>> NonEquivalentDecls;
if (!Suggested)
return false;
// FIXME: Add a specific mode for C11 6.2.7/1 in StructuralEquivalenceContext
// and isolate from other C++ specific checks.
StructuralEquivalenceContext Ctx(
D->getASTContext(), Suggested->getASTContext(), NonEquivalentDecls,
StructuralEquivalenceKind::Default,
false /*StrictTypeSpelling*/, true /*Complain*/,
true /*ErrorOnTagTypeMismatch*/);
return Ctx.IsEquivalent(D, Suggested);
}
/// Determine whether there is any declaration of \p D that was ever a
/// definition (perhaps before module merging) and is currently visible.
/// \param D The definition of the entity.
/// \param Suggested Filled in with the declaration that should be made visible
/// in order to provide a definition of this entity.
/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
/// not defined. This only matters for enums with a fixed underlying
/// type, since in all other cases, a type is complete if and only if it
/// is defined.
bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete) {
// Easy case: if we don't have modules, all declarations are visible.
if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility)
return true;
// If this definition was instantiated from a template, map back to the
// pattern from which it was instantiated.
if (isa<TagDecl>(D) && cast<TagDecl>(D)->isBeingDefined()) {
// We're in the middle of defining it; this definition should be treated
// as visible.
return true;
} else if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
if (auto *Pattern = RD->getTemplateInstantiationPattern())
RD = Pattern;
D = RD->getDefinition();
} else if (auto *ED = dyn_cast<EnumDecl>(D)) {
if (auto *Pattern = ED->getTemplateInstantiationPattern())
ED = Pattern;
if (OnlyNeedComplete && ED->isFixed()) {
// If the enum has a fixed underlying type, and we're only looking for a
// complete type (not a definition), any visible declaration of it will
// do.
*Suggested = nullptr;
for (auto *Redecl : ED->redecls()) {
if (isVisible(Redecl))
return true;
if (Redecl->isThisDeclarationADefinition() ||
(Redecl->isCanonicalDecl() && !*Suggested))
*Suggested = Redecl;
}
return false;
}
D = ED->getDefinition();
} else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
if (auto *Pattern = FD->getTemplateInstantiationPattern())
FD = Pattern;
D = FD->getDefinition();
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
if (auto *Pattern = VD->getTemplateInstantiationPattern())
VD = Pattern;
D = VD->getDefinition();
}
assert(D && "missing definition for pattern of instantiated definition");
*Suggested = D;
auto DefinitionIsVisible = [&] {
// The (primary) definition might be in a visible module.
if (isVisible(D))
return true;
// A visible module might have a merged definition instead.
if (D->isModulePrivate() ? hasMergedDefinitionInCurrentModule(D)
: hasVisibleMergedDefinition(D)) {
if (CodeSynthesisContexts.empty() &&
!getLangOpts().ModulesLocalVisibility) {
// Cache the fact that this definition is implicitly visible because
// there is a visible merged definition.
D->setVisibleDespiteOwningModule();
}
return true;
}
return false;
};
if (DefinitionIsVisible())
return true;
// The external source may have additional definitions of this entity that are
// visible, so complete the redeclaration chain now and ask again.
if (auto *Source = Context.getExternalSource()) {
Source->CompleteRedeclChain(D);
return DefinitionIsVisible();
}
return false;
}
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
RD = RD->getMostRecentNonInjectedDecl();
if (!RD->hasAttr<MSInheritanceAttr>()) {
MSInheritanceAttr::Spelling IM;
switch (S.MSPointerToMemberRepresentationMethod) {
case LangOptions::PPTMK_BestCase:
IM = RD->calculateInheritanceModel();
break;
case LangOptions::PPTMK_FullGeneralitySingleInheritance:
IM = MSInheritanceAttr::Keyword_single_inheritance;
break;
case LangOptions::PPTMK_FullGeneralityMultipleInheritance:
IM = MSInheritanceAttr::Keyword_multiple_inheritance;
break;
case LangOptions::PPTMK_FullGeneralityVirtualInheritance:
IM = MSInheritanceAttr::Keyword_unspecified_inheritance;
break;
}
RD->addAttr(MSInheritanceAttr::CreateImplicit(
S.getASTContext(), IM,
/*BestCase=*/S.MSPointerToMemberRepresentationMethod ==
LangOptions::PPTMK_BestCase,
S.ImplicitMSInheritanceAttrLoc.isValid()
? S.ImplicitMSInheritanceAttrLoc
: RD->getSourceRange()));
S.Consumer.AssignInheritanceModel(RD);
}
}
/// The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
// assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
// FIXME: Add this assertion to help us flush out problems with
// checking for dependent types and type-dependent expressions.
//
// assert(!T->isDependentType() &&
// "Can't ask whether a dependent type is complete");
if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
if (!MPTy->getClass()->isDependentType()) {
if (getLangOpts().CompleteMemberPointers &&
!MPTy->getClass()->getAsCXXRecordDecl()->isBeingDefined() &&
RequireCompleteType(Loc, QualType(MPTy->getClass(), 0),
diag::err_memptr_incomplete))
return true;
// We lock in the inheritance model once somebody has asked us to ensure
// that a pointer-to-member type is complete.
if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
(void)isCompleteType(Loc, QualType(MPTy->getClass(), 0));
assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl());
}
}
}
NamedDecl *Def = nullptr;
bool Incomplete = T->isIncompleteType(&Def);
// Check that any necessary explicit specializations are visible. For an
// enum, we just need the declaration, so don't check this.
if (Def && !isa<EnumDecl>(Def))
checkSpecializationVisibility(Loc, Def);
// If we have a complete type, we're done.
if (!Incomplete) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
if (Def &&
!hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) {
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = Diagnoser && !isSFINAEContext();
if (Diagnoser && SuggestedDef)
diagnoseMissingImport(Loc, SuggestedDef, MissingImportKind::Definition,
/*Recover*/TreatAsComplete);
return !TreatAsComplete;
} else if (Def && !TemplateInstCallbacks.empty()) {
CodeSynthesisContext TempInst;
TempInst.Kind = CodeSynthesisContext::Memoization;
TempInst.Template = Def;
TempInst.Entity = Def;
TempInst.PointOfInstantiation = Loc;
atTemplateBegin(TemplateInstCallbacks, *this, TempInst);
atTemplateEnd(TemplateInstCallbacks, *this, TempInst);
}
return false;
}
TagDecl *Tag = dyn_cast_or_null<TagDecl>(Def);
ObjCInterfaceDecl *IFace = dyn_cast_or_null<ObjCInterfaceDecl>(Def);
// Give the external source a chance to provide a definition of the type.
// This is kept separate from completing the redeclaration chain so that
// external sources such as LLDB can avoid synthesizing a type definition
// unless it's actually needed.
if (Tag || IFace) {
// Avoid diagnosing invalid decls as incomplete.
if (Def->isInvalidDecl())
return true;
// Give the external AST source a chance to complete the type.
if (auto *Source = Context.getExternalSource()) {
if (Tag && Tag->hasExternalLexicalStorage())
Source->CompleteType(Tag);
if (IFace && IFace->hasExternalLexicalStorage())
Source->CompleteType(IFace);
// If the external source completed the type, go through the motions
// again to ensure we're allowed to use the completed type.
if (!T->isIncompleteType())
return RequireCompleteTypeImpl(Loc, T, Diagnoser);
}
}
// If we have a class template specialization or a class member of a
// class template specialization, or an array with known size of such,
// try to instantiate it.
if (auto *RD = dyn_cast_or_null<CXXRecordDecl>(Tag)) {
bool Instantiated = false;
bool Diagnosed = false;
if (RD->isDependentContext()) {
// Don't try to instantiate a dependent class (eg, a member template of
// an instantiated class template specialization).
// FIXME: Can this ever happen?
} else if (auto *ClassTemplateSpec =
dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
Diagnosed = InstantiateClassTemplateSpecialization(
Loc, ClassTemplateSpec, TSK_ImplicitInstantiation,
/*Complain=*/Diagnoser);
Instantiated = true;
}
} else {
CXXRecordDecl *Pattern = RD->getInstantiatedFromMemberClass();
if (!RD->isBeingDefined() && Pattern) {
MemberSpecializationInfo *MSI = RD->getMemberSpecializationInfo();
assert(MSI && "Missing member specialization information?");
// This record was instantiated from a class within a template.
if (MSI->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization) {
Diagnosed = InstantiateClass(Loc, RD, Pattern,
getTemplateInstantiationArgs(RD),
TSK_ImplicitInstantiation,
/*Complain=*/Diagnoser);
Instantiated = true;
}
}
}
if (Instantiated) {
// Instantiate* might have already complained that the template is not
// defined, if we asked it to.
if (Diagnoser && Diagnosed)
return true;
// If we instantiated a definition, check that it's usable, even if
// instantiation produced an error, so that repeated calls to this
// function give consistent answers.
if (!T->isIncompleteType())
return RequireCompleteTypeImpl(Loc, T, Diagnoser);
}
}
// FIXME: If we didn't instantiate a definition because of an explicit
// specialization declaration, check that it's visible.
if (!Diagnoser)
return true;
Diagnoser->diagnose(*this, Loc, T);
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
if (Tag && !Tag->isInvalidDecl())
Diag(Tag->getLocation(),
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
<< Context.getTagDeclType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
if (IFace && !IFace->isInvalidDecl())
Diag(IFace->getLocation(), diag::note_forward_class);
// If we have external information that we can use to suggest a fix,
// produce a note.
if (ExternalSource)
ExternalSource->MaybeDiagnoseMissingCompleteType(Loc, T);
return true;
}
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireCompleteType(Loc, T, Diagnoser);
}
/// Get diagnostic %select index for tag kind for
/// literal type diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
/// \returns diagnostic %select index.
static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
case TTK_Struct: return 0;
case TTK_Interface: return 1;
case TTK_Class: return 2;
default: llvm_unreachable("Invalid tag kind for literal type diagnostic!");
}
}
/// Ensure that the type T is a literal type.
///
/// This routine checks whether the type @p T is a literal type. If @p T is an
/// incomplete type, an attempt is made to complete it. If @p T is a literal
/// type, or @p AllowIncompleteType is true and @p T is an incomplete type,
/// returns false. Otherwise, this routine issues the diagnostic @p PD (giving
/// it the type @p T), along with notes explaining why the type is not a
/// literal type, and returns true.
///
/// @param Loc The location in the source that the non-literal type
/// diagnostic should refer to.
///
/// @param T The type that this routine is examining for literalness.
///
/// @param Diagnoser Emits a diagnostic if T is not a literal type.
///
/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
assert(!T->isDependentType() && "type should not be dependent");
QualType ElemType = Context.getBaseElementType(T);
if ((isCompleteType(Loc, ElemType) || ElemType->isVoidType()) &&
T->isLiteralType(Context))
return false;
Diagnoser.diagnose(*this, Loc, T);
if (T->isVariableArrayType())
return true;
const RecordType *RT = ElemType->getAs<RecordType>();
if (!RT)
return true;
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
// A partially-defined class type can't be a literal type, because a literal
// class type must have a trivial destructor (which can't be checked until
// the class definition is complete).
if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T))
return true;
// [expr.prim.lambda]p3:
// This class type is [not] a literal type.
if (RD->isLambda() && !getLangOpts().CPlusPlus17) {
Diag(RD->getLocation(), diag::note_non_literal_lambda);
return true;
}
// If the class has virtual base classes, then it's not an aggregate, and
// cannot have any constexpr constructors or a trivial default constructor,
// so is non-literal. This is better to diagnose than the resulting absence
// of constexpr constructors.
if (RD->getNumVBases()) {
Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
<< getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
for (const auto &I : RD->vbases())
Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here)
<< I.getSourceRange();
} else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
!RD->hasTrivialDefaultConstructor()) {
Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD;
} else if (RD->hasNonLiteralTypeFieldsOrBases()) {
for (const auto &I : RD->bases()) {
if (!I.getType()->isLiteralType(Context)) {
Diag(I.getBeginLoc(), diag::note_non_literal_base_class)
<< RD << I.getType() << I.getSourceRange();
return true;
}
}
for (const auto *I : RD->fields()) {
if (!I->getType()->isLiteralType(Context) ||
I->getType().isVolatileQualified()) {
Diag(I->getLocation(), diag::note_non_literal_field)
<< RD << I << I->getType()
<< I->getType().isVolatileQualified();
return true;
}
}
} else if (!RD->hasTrivialDestructor()) {
// All fields and bases are of literal types, so have trivial destructors.
// If this class's destructor is non-trivial it must be user-declared.
CXXDestructorDecl *Dtor = RD->getDestructor();
assert(Dtor && "class has literal fields and bases but no dtor?");
if (!Dtor)
return true;
Diag(Dtor->getLocation(), Dtor->isUserProvided() ?
diag::note_non_literal_user_provided_dtor :
diag::note_non_literal_nontrivial_dtor) << RD;
if (!Dtor->isUserProvided())
SpecialMemberIsTrivial(Dtor, CXXDestructor, TAH_IgnoreTrivialABI,
/*Diagnose*/true);
}
return true;
}
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireLiteralType(Loc, T, Diagnoser);
}
/// Retrieve a version of the type 'T' that is elaborated by Keyword, qualified
/// by the nested-name-specifier contained in SS, and that is (re)declared by
/// OwnedTagDecl, which is nullptr if this is not a (re)declaration.
QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl) {
if (T.isNull())
return T;
NestedNameSpecifier *NNS;
if (SS.isValid())
NNS = SS.getScopeRep();
else {
if (Keyword == ETK_None)
return T;
NNS = nullptr;
}
return Context.getElaboratedType(Keyword, NNS, T, OwnedTagDecl);
}
QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (!getLangOpts().CPlusPlus && E->refersToBitField())
Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2;
if (!E->isTypeDependent()) {
QualType T = E->getType();
if (const TagType *TT = T->getAs<TagType>())
DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
}
return Context.getTypeOfExprType(E);
}
/// getDecltypeForExpr - Given an expr, will return the decltype for
/// that expression, according to the rules in C++11
/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
static QualType getDecltypeForExpr(Sema &S, Expr *E) {
if (E->isTypeDependent())
return S.Context.DependentTy;
// C++11 [dcl.type.simple]p4:
// The type denoted by decltype(e) is defined as follows:
//
// - if e is an unparenthesized id-expression or an unparenthesized class
// member access (5.2.5), decltype(e) is the type of the entity named
// by e. If there is no such entity, or if e names a set of overloaded
// functions, the program is ill-formed;
//
// We apply the same rules for Objective-C ivar and property references.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *VD = DRE->getDecl();
return VD->getType();
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (const ValueDecl *VD = ME->getMemberDecl())
if (isa<FieldDecl>(VD) || isa<VarDecl>(VD))
return VD->getType();
} else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) {
return IR->getDecl()->getType();
} else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
if (PR->isExplicitProperty())
return PR->getExplicitProperty()->getType();
} else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
return PE->getType();
}
// C++11 [expr.lambda.prim]p18:
// Every occurrence of decltype((x)) where x is a possibly
// parenthesized id-expression that names an entity of automatic
// storage duration is treated as if x were transformed into an
// access to a corresponding data member of the closure type that
// would have been declared if x were an odr-use of the denoted
// entity.
using namespace sema;
if (S.getCurLambda()) {
if (isa<ParenExpr>(E)) {
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
if (!T.isNull())
return S.Context.getLValueReferenceType(T);
}
}
}
}
// C++11 [dcl.type.simple]p4:
// [...]
QualType T = E->getType();
switch (E->getValueKind()) {
// - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
// type of e;
case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
// - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
// type of e;
case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
// - otherwise, decltype(e) is the type of e.
case VK_RValue: break;
}
return T;
}
QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (AsUnevaluated && CodeSynthesisContexts.empty() &&
E->HasSideEffects(Context, false)) {
// The expression operand for decltype is in an unevaluated expression
// context, so side effects could result in unintended consequences.
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
}
return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
}
QualType Sema::BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc) {
switch (UKind) {
case UnaryTransformType::EnumUnderlyingType:
if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) {
Diag(Loc, diag::err_only_enums_have_underlying_types);
return QualType();
} else {
QualType Underlying = BaseType;
if (!BaseType->isDependentType()) {
// The enum could be incomplete if we're parsing its definition or
// recovering from an error.
NamedDecl *FwdDecl = nullptr;
if (BaseType->isIncompleteType(&FwdDecl)) {
Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType;
Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl;
return QualType();
}
EnumDecl *ED = BaseType->getAs<EnumType>()->getDecl();
assert(ED && "EnumType has no EnumDecl");
DiagnoseUseOfDecl(ED, Loc);
Underlying = ED->getIntegerType();
assert(!Underlying.isNull());
}
return Context.getUnaryTransformType(BaseType, Underlying,
UnaryTransformType::EnumUnderlyingType);
}
}
llvm_unreachable("unknown unary transform type");
}
QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
if (!T->isDependentType()) {
// FIXME: It isn't entirely clear whether incomplete atomic types
// are allowed or not; for simplicity, ban them for the moment.
if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0))
return QualType();
int DisallowedKind = -1;
if (T->isArrayType())
DisallowedKind = 1;
else if (T->isFunctionType())
DisallowedKind = 2;
else if (T->isReferenceType())
DisallowedKind = 3;
else if (T->isAtomicType())
DisallowedKind = 4;
else if (T.hasQualifiers())
DisallowedKind = 5;
else if (!T.isTriviallyCopyableType(Context))
// Some other non-trivially-copyable type (probably a C++ class)
DisallowedKind = 6;
if (DisallowedKind != -1) {
Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
return QualType();
}
// FIXME: Do we need any handling for ARC here?
}
// Build the pointer type.
return Context.getAtomicType(T);
}
Index: stable/12/contrib/llvm-project/clang
===================================================================
--- stable/12/contrib/llvm-project/clang (revision 356464)
+++ stable/12/contrib/llvm-project/clang (revision 356465)
Property changes on: stable/12/contrib/llvm-project/clang
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /vendor/llvm-project/release-9.x/clang:r355957-355987
Merged /head/contrib/llvm-project/clang:r356004
Index: stable/12/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c
===================================================================
--- stable/12/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c (revision 356464)
+++ stable/12/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c (revision 356465)
@@ -1,309 +1,314 @@
/*===- InstrProfilingUtil.c - Support library for PGO instrumentation -----===*\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
#ifdef _WIN32
#include <direct.h>
#include <process.h>
#include <windows.h>
#include "WindowsMMap.h"
#else
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#endif
#ifdef COMPILER_RT_HAS_UNAME
#include <sys/utsname.h>
#endif
#include <stdlib.h>
#include <string.h>
#if defined(__linux__)
#include <signal.h>
#include <sys/prctl.h>
#endif
#include "InstrProfiling.h"
#include "InstrProfilingUtil.h"
COMPILER_RT_WEAK unsigned lprofDirMode = 0755;
COMPILER_RT_VISIBILITY
void __llvm_profile_recursive_mkdir(char *path) {
int i;
for (i = 1; path[i] != '\0'; ++i) {
char save = path[i];
if (!IS_DIR_SEPARATOR(path[i]))
continue;
path[i] = '\0';
#ifdef _WIN32
_mkdir(path);
#else
/* Some of these will fail, ignore it. */
mkdir(path, __llvm_profile_get_dir_mode());
#endif
path[i] = save;
}
}
COMPILER_RT_VISIBILITY
void __llvm_profile_set_dir_mode(unsigned Mode) { lprofDirMode = Mode; }
COMPILER_RT_VISIBILITY
unsigned __llvm_profile_get_dir_mode(void) { return lprofDirMode; }
#if COMPILER_RT_HAS_ATOMICS != 1
COMPILER_RT_VISIBILITY
uint32_t lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV) {
void *R = *Ptr;
if (R == OldV) {
*Ptr = NewV;
return 1;
}
return 0;
}
COMPILER_RT_VISIBILITY
void *lprofPtrFetchAdd(void **Mem, long ByteIncr) {
void *Old = *Mem;
*((char **)Mem) += ByteIncr;
return Old;
}
#endif
#ifdef _WIN32
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN];
DWORD BufferSize = sizeof(Buffer);
BOOL Result =
GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize);
if (!Result)
return -1;
if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0)
return -1;
return 0;
}
#elif defined(COMPILER_RT_HAS_UNAME)
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
struct utsname N;
int R = uname(&N);
if (R >= 0) {
strncpy(Name, N.nodename, Len);
return 0;
}
return R;
}
#endif
COMPILER_RT_VISIBILITY int lprofLockFd(int fd) {
#ifdef COMPILER_RT_HAS_FCNTL_LCK
struct flock s_flock;
s_flock.l_whence = SEEK_SET;
s_flock.l_start = 0;
s_flock.l_len = 0; /* Until EOF. */
s_flock.l_pid = getpid();
s_flock.l_type = F_WRLCK;
while (fcntl(fd, F_SETLKW, &s_flock) == -1) {
if (errno != EINTR) {
if (errno == ENOLCK) {
return -1;
}
break;
}
}
return 0;
#else
flock(fd, LOCK_EX);
return 0;
#endif
}
COMPILER_RT_VISIBILITY int lprofUnlockFd(int fd) {
#ifdef COMPILER_RT_HAS_FCNTL_LCK
struct flock s_flock;
s_flock.l_whence = SEEK_SET;
s_flock.l_start = 0;
s_flock.l_len = 0; /* Until EOF. */
s_flock.l_pid = getpid();
s_flock.l_type = F_UNLCK;
while (fcntl(fd, F_SETLKW, &s_flock) == -1) {
if (errno != EINTR) {
if (errno == ENOLCK) {
return -1;
}
break;
}
}
return 0;
#else
flock(fd, LOCK_UN);
return 0;
#endif
}
COMPILER_RT_VISIBILITY int lprofLockFileHandle(FILE *F) {
int fd;
#if defined(_WIN32)
fd = _fileno(F);
#else
fd = fileno(F);
#endif
return lprofLockFd(fd);
}
COMPILER_RT_VISIBILITY int lprofUnlockFileHandle(FILE *F) {
int fd;
#if defined(_WIN32)
fd = _fileno(F);
#else
fd = fileno(F);
#endif
return lprofUnlockFd(fd);
}
COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) {
FILE *f;
int fd;
#ifdef COMPILER_RT_HAS_FCNTL_LCK
fd = open(ProfileName, O_RDWR | O_CREAT, 0666);
if (fd < 0)
return NULL;
if (lprofLockFd(fd) != 0)
PROF_WARN("Data may be corrupted during profile merging : %s\n",
"Fail to obtain file lock due to system limit.");
f = fdopen(fd, "r+b");
#elif defined(_WIN32)
// FIXME: Use the wide variants to handle Unicode filenames.
- HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE, 0, 0,
- OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
+ HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, 0, OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, 0);
if (h == INVALID_HANDLE_VALUE)
return NULL;
fd = _open_osfhandle((intptr_t)h, 0);
if (fd == -1) {
CloseHandle(h);
return NULL;
}
+
+ if (lprofLockFd(fd) != 0)
+ PROF_WARN("Data may be corrupted during profile merging : %s\n",
+ "Fail to obtain file lock due to system limit.");
f = _fdopen(fd, "r+b");
if (f == 0) {
CloseHandle(h);
return NULL;
}
#else
/* Worst case no locking applied. */
PROF_WARN("Concurrent file access is not supported : %s\n",
"lack file locking");
fd = open(ProfileName, O_RDWR | O_CREAT, 0666);
if (fd < 0)
return NULL;
f = fdopen(fd, "r+b");
#endif
return f;
}
COMPILER_RT_VISIBILITY const char *lprofGetPathPrefix(int *PrefixStrip,
size_t *PrefixLen) {
const char *Prefix = getenv("GCOV_PREFIX");
const char *PrefixStripStr = getenv("GCOV_PREFIX_STRIP");
*PrefixLen = 0;
*PrefixStrip = 0;
if (Prefix == NULL || Prefix[0] == '\0')
return NULL;
if (PrefixStripStr) {
*PrefixStrip = atoi(PrefixStripStr);
/* Negative GCOV_PREFIX_STRIP values are ignored */
if (*PrefixStrip < 0)
*PrefixStrip = 0;
} else {
*PrefixStrip = 0;
}
*PrefixLen = strlen(Prefix);
return Prefix;
}
COMPILER_RT_VISIBILITY void
lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
size_t PrefixLen, int PrefixStrip) {
const char *Ptr;
int Level;
const char *StrippedPathStr = PathStr;
for (Level = 0, Ptr = PathStr + 1; Level < PrefixStrip; ++Ptr) {
if (*Ptr == '\0')
break;
if (!IS_DIR_SEPARATOR(*Ptr))
continue;
StrippedPathStr = Ptr;
++Level;
}
memcpy(Dest, Prefix, PrefixLen);
if (!IS_DIR_SEPARATOR(Prefix[PrefixLen - 1]))
Dest[PrefixLen++] = DIR_SEPARATOR;
memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1);
}
COMPILER_RT_VISIBILITY const char *
lprofFindFirstDirSeparator(const char *Path) {
const char *Sep = strchr(Path, DIR_SEPARATOR);
#if defined(DIR_SEPARATOR_2)
const char *Sep2 = strchr(Path, DIR_SEPARATOR_2);
if (Sep2 && (!Sep || Sep2 < Sep))
Sep = Sep2;
#endif
return Sep;
}
COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
const char *Sep = strrchr(Path, DIR_SEPARATOR);
#if defined(DIR_SEPARATOR_2)
const char *Sep2 = strrchr(Path, DIR_SEPARATOR_2);
if (Sep2 && (!Sep || Sep2 > Sep))
Sep = Sep2;
#endif
return Sep;
}
COMPILER_RT_VISIBILITY int lprofSuspendSigKill() {
#if defined(__linux__)
int PDeachSig = 0;
/* Temporarily suspend getting SIGKILL upon exit of the parent process. */
if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL)
prctl(PR_SET_PDEATHSIG, 0);
return (PDeachSig == SIGKILL);
#else
return 0;
#endif
}
COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
#if defined(__linux__)
prctl(PR_SET_PDEATHSIG, SIGKILL);
#endif
}
Index: stable/12/contrib/llvm-project/compiler-rt
===================================================================
--- stable/12/contrib/llvm-project/compiler-rt (revision 356464)
+++ stable/12/contrib/llvm-project/compiler-rt (revision 356465)
Property changes on: stable/12/contrib/llvm-project/compiler-rt
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /vendor/llvm-project/release-9.x/compiler-rt:r355951-355987
Merged /head/contrib/llvm-project/compiler-rt:r356004
Index: stable/12/contrib/llvm-project/lld/COFF/Driver.cpp
===================================================================
--- stable/12/contrib/llvm-project/lld/COFF/Driver.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/lld/COFF/Driver.cpp (revision 356465)
@@ -1,1917 +1,1917 @@
//===- Driver.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Driver.h"
#include "Config.h"
#include "DebugTypes.h"
#include "ICF.h"
#include "InputFiles.h"
#include "MarkLive.h"
#include "MinGW.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "Writer.h"
#include "lld/Common/Args.h"
#include "lld/Common/Driver.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Filesystem.h"
#include "lld/Common/Memory.h"
#include "lld/Common/Threads.h"
#include "lld/Common/Timer.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Object/ArchiveWriter.h"
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Object/COFFModuleDefinition.h"
#include "llvm/Object/WindowsMachineFlag.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/TarWriter.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ToolDrivers/llvm-lib/LibDriver.h"
#include <algorithm>
#include <future>
#include <memory>
using namespace llvm;
using namespace llvm::object;
using namespace llvm::COFF;
using llvm::sys::Process;
namespace lld {
namespace coff {
static Timer inputFileTimer("Input File Reading", Timer::root());
Configuration *config;
LinkerDriver *driver;
bool link(ArrayRef<const char *> args, bool canExitEarly, raw_ostream &diag) {
errorHandler().logName = args::getFilenameWithoutExe(args[0]);
errorHandler().errorOS = &diag;
errorHandler().colorDiagnostics = diag.has_colors();
errorHandler().errorLimitExceededMsg =
"too many errors emitted, stopping now"
" (use /errorlimit:0 to see all errors)";
errorHandler().exitEarly = canExitEarly;
config = make<Configuration>();
symtab = make<SymbolTable>();
driver = make<LinkerDriver>();
driver->link(args);
// Call exit() if we can to avoid calling destructors.
if (canExitEarly)
exitLld(errorCount() ? 1 : 0);
freeArena();
ObjFile::instances.clear();
ImportFile::instances.clear();
BitcodeFile::instances.clear();
memset(MergeChunk::instances, 0, sizeof(MergeChunk::instances));
return !errorCount();
}
// Parse options of the form "old;new".
static std::pair<StringRef, StringRef> getOldNewOptions(opt::InputArgList &args,
unsigned id) {
auto *arg = args.getLastArg(id);
if (!arg)
return {"", ""};
StringRef s = arg->getValue();
std::pair<StringRef, StringRef> ret = s.split(';');
if (ret.second.empty())
error(arg->getSpelling() + " expects 'old;new' format, but got " + s);
return ret;
}
// Drop directory components and replace extension with ".exe" or ".dll".
static std::string getOutputPath(StringRef path) {
auto p = path.find_last_of("\\/");
StringRef s = (p == StringRef::npos) ? path : path.substr(p + 1);
const char* e = config->dll ? ".dll" : ".exe";
return (s.substr(0, s.rfind('.')) + e).str();
}
// Returns true if S matches /crtend.?\.o$/.
static bool isCrtend(StringRef s) {
if (!s.endswith(".o"))
return false;
s = s.drop_back(2);
if (s.endswith("crtend"))
return true;
return !s.empty() && s.drop_back().endswith("crtend");
}
// ErrorOr is not default constructible, so it cannot be used as the type
// parameter of a future.
// FIXME: We could open the file in createFutureForFile and avoid needing to
// return an error here, but for the moment that would cost us a file descriptor
// (a limited resource on Windows) for the duration that the future is pending.
using MBErrPair = std::pair<std::unique_ptr<MemoryBuffer>, std::error_code>;
// Create a std::future that opens and maps a file using the best strategy for
// the host platform.
static std::future<MBErrPair> createFutureForFile(std::string path) {
#if _WIN32
// On Windows, file I/O is relatively slow so it is best to do this
// asynchronously.
auto strategy = std::launch::async;
#else
auto strategy = std::launch::deferred;
#endif
return std::async(strategy, [=]() {
auto mbOrErr = MemoryBuffer::getFile(path,
/*FileSize*/ -1,
/*RequiresNullTerminator*/ false);
if (!mbOrErr)
return MBErrPair{nullptr, mbOrErr.getError()};
return MBErrPair{std::move(*mbOrErr), std::error_code()};
});
}
// Symbol names are mangled by prepending "_" on x86.
static StringRef mangle(StringRef sym) {
assert(config->machine != IMAGE_FILE_MACHINE_UNKNOWN);
if (config->machine == I386)
return saver.save("_" + sym);
return sym;
}
static bool findUnderscoreMangle(StringRef sym) {
Symbol *s = symtab->findMangle(mangle(sym));
return s && !isa<Undefined>(s);
}
MemoryBufferRef LinkerDriver::takeBuffer(std::unique_ptr<MemoryBuffer> mb) {
MemoryBufferRef mbref = *mb;
make<std::unique_ptr<MemoryBuffer>>(std::move(mb)); // take ownership
if (driver->tar)
driver->tar->append(relativeToRoot(mbref.getBufferIdentifier()),
mbref.getBuffer());
return mbref;
}
void LinkerDriver::addBuffer(std::unique_ptr<MemoryBuffer> mb,
bool wholeArchive) {
StringRef filename = mb->getBufferIdentifier();
MemoryBufferRef mbref = takeBuffer(std::move(mb));
filePaths.push_back(filename);
// File type is detected by contents, not by file extension.
switch (identify_magic(mbref.getBuffer())) {
case file_magic::windows_resource:
resources.push_back(mbref);
break;
case file_magic::archive:
if (wholeArchive) {
std::unique_ptr<Archive> file =
CHECK(Archive::create(mbref), filename + ": failed to parse archive");
Archive *archive = file.get();
make<std::unique_ptr<Archive>>(std::move(file)); // take ownership
for (MemoryBufferRef m : getArchiveMembers(archive))
addArchiveBuffer(m, "<whole-archive>", filename, 0);
return;
}
symtab->addFile(make<ArchiveFile>(mbref));
break;
case file_magic::bitcode:
symtab->addFile(make<BitcodeFile>(mbref, "", 0));
break;
case file_magic::coff_object:
case file_magic::coff_import_library:
symtab->addFile(make<ObjFile>(mbref));
break;
case file_magic::pdb:
loadTypeServerSource(mbref);
break;
case file_magic::coff_cl_gl_object:
error(filename + ": is not a native COFF file. Recompile without /GL");
break;
case file_magic::pecoff_executable:
if (filename.endswith_lower(".dll")) {
error(filename + ": bad file type. Did you specify a DLL instead of an "
"import library?");
break;
}
LLVM_FALLTHROUGH;
default:
error(mbref.getBufferIdentifier() + ": unknown file type");
break;
}
}
void LinkerDriver::enqueuePath(StringRef path, bool wholeArchive) {
auto future =
std::make_shared<std::future<MBErrPair>>(createFutureForFile(path));
std::string pathStr = path;
enqueueTask([=]() {
auto mbOrErr = future->get();
if (mbOrErr.second) {
std::string msg =
"could not open '" + pathStr + "': " + mbOrErr.second.message();
// Check if the filename is a typo for an option flag. OptTable thinks
// that all args that are not known options and that start with / are
// filenames, but e.g. `/nodefaultlibs` is more likely a typo for
// the option `/nodefaultlib` than a reference to a file in the root
// directory.
std::string nearest;
if (COFFOptTable().findNearest(pathStr, nearest) > 1)
error(msg);
else
error(msg + "; did you mean '" + nearest + "'");
} else
driver->addBuffer(std::move(mbOrErr.first), wholeArchive);
});
}
void LinkerDriver::addArchiveBuffer(MemoryBufferRef mb, StringRef symName,
StringRef parentName,
uint64_t offsetInArchive) {
file_magic magic = identify_magic(mb.getBuffer());
if (magic == file_magic::coff_import_library) {
InputFile *imp = make<ImportFile>(mb);
imp->parentName = parentName;
symtab->addFile(imp);
return;
}
InputFile *obj;
if (magic == file_magic::coff_object) {
obj = make<ObjFile>(mb);
} else if (magic == file_magic::bitcode) {
obj = make<BitcodeFile>(mb, parentName, offsetInArchive);
} else {
error("unknown file type: " + mb.getBufferIdentifier());
return;
}
obj->parentName = parentName;
symtab->addFile(obj);
log("Loaded " + toString(obj) + " for " + symName);
}
void LinkerDriver::enqueueArchiveMember(const Archive::Child &c,
const Archive::Symbol &sym,
StringRef parentName) {
auto reportBufferError = [=](Error &&e, StringRef childName) {
fatal("could not get the buffer for the member defining symbol " +
toCOFFString(sym) + ": " + parentName + "(" + childName + "): " +
toString(std::move(e)));
};
if (!c.getParent()->isThin()) {
uint64_t offsetInArchive = c.getChildOffset();
Expected<MemoryBufferRef> mbOrErr = c.getMemoryBufferRef();
if (!mbOrErr)
reportBufferError(mbOrErr.takeError(), check(c.getFullName()));
MemoryBufferRef mb = mbOrErr.get();
enqueueTask([=]() {
driver->addArchiveBuffer(mb, toCOFFString(sym), parentName,
offsetInArchive);
});
return;
}
std::string childName = CHECK(
c.getFullName(),
"could not get the filename for the member defining symbol " +
toCOFFString(sym));
auto future = std::make_shared<std::future<MBErrPair>>(
createFutureForFile(childName));
enqueueTask([=]() {
auto mbOrErr = future->get();
if (mbOrErr.second)
reportBufferError(errorCodeToError(mbOrErr.second), childName);
driver->addArchiveBuffer(takeBuffer(std::move(mbOrErr.first)),
toCOFFString(sym), parentName,
/*OffsetInArchive=*/0);
});
}
static bool isDecorated(StringRef sym) {
return sym.startswith("@") || sym.contains("@@") || sym.startswith("?") ||
(!config->mingw && sym.contains('@'));
}
// Parses .drectve section contents and returns a list of files
// specified by /defaultlib.
void LinkerDriver::parseDirectives(InputFile *file) {
StringRef s = file->getDirectives();
if (s.empty())
return;
log("Directives: " + toString(file) + ": " + s);
ArgParser parser;
// .drectve is always tokenized using Windows shell rules.
// /EXPORT: option can appear too many times, processing in fastpath.
opt::InputArgList args;
std::vector<StringRef> exports;
std::tie(args, exports) = parser.parseDirectives(s);
for (StringRef e : exports) {
// If a common header file contains dllexported function
// declarations, many object files may end up with having the
// same /EXPORT options. In order to save cost of parsing them,
// we dedup them first.
if (!directivesExports.insert(e).second)
continue;
Export exp = parseExport(e);
if (config->machine == I386 && config->mingw) {
if (!isDecorated(exp.name))
exp.name = saver.save("_" + exp.name);
if (!exp.extName.empty() && !isDecorated(exp.extName))
exp.extName = saver.save("_" + exp.extName);
}
exp.directives = true;
config->exports.push_back(exp);
}
for (auto *arg : args) {
switch (arg->getOption().getID()) {
case OPT_aligncomm:
parseAligncomm(arg->getValue());
break;
case OPT_alternatename:
parseAlternateName(arg->getValue());
break;
case OPT_defaultlib:
if (Optional<StringRef> path = findLib(arg->getValue()))
enqueuePath(*path, false);
break;
case OPT_entry:
config->entry = addUndefined(mangle(arg->getValue()));
break;
case OPT_failifmismatch:
checkFailIfMismatch(arg->getValue(), file);
break;
case OPT_incl:
addUndefined(arg->getValue());
break;
case OPT_merge:
parseMerge(arg->getValue());
break;
case OPT_nodefaultlib:
config->noDefaultLibs.insert(doFindLib(arg->getValue()).lower());
break;
case OPT_section:
parseSection(arg->getValue());
break;
case OPT_subsystem:
parseSubsystem(arg->getValue(), &config->subsystem,
&config->majorOSVersion, &config->minorOSVersion);
break;
// Only add flags here that link.exe accepts in
// `#pragma comment(linker, "/flag")`-generated sections.
case OPT_editandcontinue:
case OPT_guardsym:
case OPT_throwingnew:
break;
default:
error(arg->getSpelling() + " is not allowed in .drectve");
}
}
}
// Find file from search paths. You can omit ".obj", this function takes
// care of that. Note that the returned path is not guaranteed to exist.
StringRef LinkerDriver::doFindFile(StringRef filename) {
bool hasPathSep = (filename.find_first_of("/\\") != StringRef::npos);
if (hasPathSep)
return filename;
bool hasExt = filename.contains('.');
for (StringRef dir : searchPaths) {
SmallString<128> path = dir;
sys::path::append(path, filename);
if (sys::fs::exists(path.str()))
return saver.save(path.str());
if (!hasExt) {
path.append(".obj");
if (sys::fs::exists(path.str()))
return saver.save(path.str());
}
}
return filename;
}
static Optional<sys::fs::UniqueID> getUniqueID(StringRef path) {
sys::fs::UniqueID ret;
if (sys::fs::getUniqueID(path, ret))
return None;
return ret;
}
// Resolves a file path. This never returns the same path
// (in that case, it returns None).
Optional<StringRef> LinkerDriver::findFile(StringRef filename) {
StringRef path = doFindFile(filename);
if (Optional<sys::fs::UniqueID> id = getUniqueID(path)) {
bool seen = !visitedFiles.insert(*id).second;
if (seen)
return None;
}
if (path.endswith_lower(".lib"))
visitedLibs.insert(sys::path::filename(path));
return path;
}
// MinGW specific. If an embedded directive specified to link to
// foo.lib, but it isn't found, try libfoo.a instead.
StringRef LinkerDriver::doFindLibMinGW(StringRef filename) {
if (filename.contains('/') || filename.contains('\\'))
return filename;
SmallString<128> s = filename;
sys::path::replace_extension(s, ".a");
StringRef libName = saver.save("lib" + s.str());
return doFindFile(libName);
}
// Find library file from search path.
StringRef LinkerDriver::doFindLib(StringRef filename) {
// Add ".lib" to Filename if that has no file extension.
bool hasExt = filename.contains('.');
if (!hasExt)
filename = saver.save(filename + ".lib");
StringRef ret = doFindFile(filename);
// For MinGW, if the find above didn't turn up anything, try
// looking for a MinGW formatted library name.
if (config->mingw && ret == filename)
return doFindLibMinGW(filename);
return ret;
}
// Resolves a library path. /nodefaultlib options are taken into
// consideration. This never returns the same path (in that case,
// it returns None).
Optional<StringRef> LinkerDriver::findLib(StringRef filename) {
if (config->noDefaultLibAll)
return None;
if (!visitedLibs.insert(filename.lower()).second)
return None;
StringRef path = doFindLib(filename);
if (config->noDefaultLibs.count(path.lower()))
return None;
if (Optional<sys::fs::UniqueID> id = getUniqueID(path))
if (!visitedFiles.insert(*id).second)
return None;
return path;
}
// Parses LIB environment which contains a list of search paths.
void LinkerDriver::addLibSearchPaths() {
Optional<std::string> envOpt = Process::GetEnv("LIB");
if (!envOpt.hasValue())
return;
StringRef env = saver.save(*envOpt);
while (!env.empty()) {
StringRef path;
std::tie(path, env) = env.split(';');
searchPaths.push_back(path);
}
}
Symbol *LinkerDriver::addUndefined(StringRef name) {
Symbol *b = symtab->addUndefined(name);
if (!b->isGCRoot) {
b->isGCRoot = true;
config->gcroot.push_back(b);
}
return b;
}
StringRef LinkerDriver::mangleMaybe(Symbol *s) {
// If the plain symbol name has already been resolved, do nothing.
Undefined *unmangled = dyn_cast<Undefined>(s);
if (!unmangled)
return "";
// Otherwise, see if a similar, mangled symbol exists in the symbol table.
Symbol *mangled = symtab->findMangle(unmangled->getName());
if (!mangled)
return "";
// If we find a similar mangled symbol, make this an alias to it and return
// its name.
log(unmangled->getName() + " aliased to " + mangled->getName());
unmangled->weakAlias = symtab->addUndefined(mangled->getName());
return mangled->getName();
}
// Windows specific -- find default entry point name.
//
// There are four different entry point functions for Windows executables,
// each of which corresponds to a user-defined "main" function. This function
// infers an entry point from a user-defined "main" function.
StringRef LinkerDriver::findDefaultEntry() {
assert(config->subsystem != IMAGE_SUBSYSTEM_UNKNOWN &&
"must handle /subsystem before calling this");
if (config->mingw)
return mangle(config->subsystem == IMAGE_SUBSYSTEM_WINDOWS_GUI
? "WinMainCRTStartup"
: "mainCRTStartup");
if (config->subsystem == IMAGE_SUBSYSTEM_WINDOWS_GUI) {
if (findUnderscoreMangle("wWinMain")) {
if (!findUnderscoreMangle("WinMain"))
return mangle("wWinMainCRTStartup");
warn("found both wWinMain and WinMain; using latter");
}
return mangle("WinMainCRTStartup");
}
if (findUnderscoreMangle("wmain")) {
if (!findUnderscoreMangle("main"))
return mangle("wmainCRTStartup");
warn("found both wmain and main; using latter");
}
return mangle("mainCRTStartup");
}
WindowsSubsystem LinkerDriver::inferSubsystem() {
if (config->dll)
return IMAGE_SUBSYSTEM_WINDOWS_GUI;
if (config->mingw)
return IMAGE_SUBSYSTEM_WINDOWS_CUI;
// Note that link.exe infers the subsystem from the presence of these
// functions even if /entry: or /nodefaultlib are passed which causes them
// to not be called.
bool haveMain = findUnderscoreMangle("main");
bool haveWMain = findUnderscoreMangle("wmain");
bool haveWinMain = findUnderscoreMangle("WinMain");
bool haveWWinMain = findUnderscoreMangle("wWinMain");
if (haveMain || haveWMain) {
if (haveWinMain || haveWWinMain) {
warn(std::string("found ") + (haveMain ? "main" : "wmain") + " and " +
(haveWinMain ? "WinMain" : "wWinMain") +
"; defaulting to /subsystem:console");
}
return IMAGE_SUBSYSTEM_WINDOWS_CUI;
}
if (haveWinMain || haveWWinMain)
return IMAGE_SUBSYSTEM_WINDOWS_GUI;
return IMAGE_SUBSYSTEM_UNKNOWN;
}
static uint64_t getDefaultImageBase() {
if (config->is64())
return config->dll ? 0x180000000 : 0x140000000;
return config->dll ? 0x10000000 : 0x400000;
}
static std::string createResponseFile(const opt::InputArgList &args,
ArrayRef<StringRef> filePaths,
ArrayRef<StringRef> searchPaths) {
SmallString<0> data;
raw_svector_ostream os(data);
for (auto *arg : args) {
switch (arg->getOption().getID()) {
case OPT_linkrepro:
case OPT_INPUT:
case OPT_defaultlib:
case OPT_libpath:
case OPT_manifest:
case OPT_manifest_colon:
case OPT_manifestdependency:
case OPT_manifestfile:
case OPT_manifestinput:
case OPT_manifestuac:
break;
case OPT_implib:
case OPT_pdb:
case OPT_out:
os << arg->getSpelling() << sys::path::filename(arg->getValue()) << "\n";
break;
default:
os << toString(*arg) << "\n";
}
}
for (StringRef path : searchPaths) {
std::string relPath = relativeToRoot(path);
os << "/libpath:" << quote(relPath) << "\n";
}
for (StringRef path : filePaths)
os << quote(relativeToRoot(path)) << "\n";
return data.str();
}
enum class DebugKind { Unknown, None, Full, FastLink, GHash, Dwarf, Symtab };
static DebugKind parseDebugKind(const opt::InputArgList &args) {
auto *a = args.getLastArg(OPT_debug, OPT_debug_opt);
if (!a)
return DebugKind::None;
if (a->getNumValues() == 0)
return DebugKind::Full;
DebugKind debug = StringSwitch<DebugKind>(a->getValue())
.CaseLower("none", DebugKind::None)
.CaseLower("full", DebugKind::Full)
.CaseLower("fastlink", DebugKind::FastLink)
// LLD extensions
.CaseLower("ghash", DebugKind::GHash)
.CaseLower("dwarf", DebugKind::Dwarf)
.CaseLower("symtab", DebugKind::Symtab)
.Default(DebugKind::Unknown);
if (debug == DebugKind::FastLink) {
warn("/debug:fastlink unsupported; using /debug:full");
return DebugKind::Full;
}
if (debug == DebugKind::Unknown) {
error("/debug: unknown option: " + Twine(a->getValue()));
return DebugKind::None;
}
return debug;
}
static unsigned parseDebugTypes(const opt::InputArgList &args) {
unsigned debugTypes = static_cast<unsigned>(DebugType::None);
if (auto *a = args.getLastArg(OPT_debugtype)) {
SmallVector<StringRef, 3> types;
StringRef(a->getValue())
.split(types, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
for (StringRef type : types) {
unsigned v = StringSwitch<unsigned>(type.lower())
.Case("cv", static_cast<unsigned>(DebugType::CV))
.Case("pdata", static_cast<unsigned>(DebugType::PData))
.Case("fixup", static_cast<unsigned>(DebugType::Fixup))
.Default(0);
if (v == 0) {
warn("/debugtype: unknown option '" + type + "'");
continue;
}
debugTypes |= v;
}
return debugTypes;
}
// Default debug types
debugTypes = static_cast<unsigned>(DebugType::CV);
if (args.hasArg(OPT_driver))
debugTypes |= static_cast<unsigned>(DebugType::PData);
if (args.hasArg(OPT_profile))
debugTypes |= static_cast<unsigned>(DebugType::Fixup);
return debugTypes;
}
static std::string getMapFile(const opt::InputArgList &args) {
auto *arg = args.getLastArg(OPT_lldmap, OPT_lldmap_file);
if (!arg)
return "";
if (arg->getOption().getID() == OPT_lldmap_file)
return arg->getValue();
assert(arg->getOption().getID() == OPT_lldmap);
StringRef outFile = config->outputFile;
return (outFile.substr(0, outFile.rfind('.')) + ".map").str();
}
static std::string getImplibPath() {
if (!config->implib.empty())
return config->implib;
SmallString<128> out = StringRef(config->outputFile);
sys::path::replace_extension(out, ".lib");
return out.str();
}
//
// The import name is caculated as the following:
//
// | LIBRARY w/ ext | LIBRARY w/o ext | no LIBRARY
// -----+----------------+---------------------+------------------
// LINK | {value} | {value}.{.dll/.exe} | {output name}
// LIB | {value} | {value}.dll | {output name}.dll
//
static std::string getImportName(bool asLib) {
SmallString<128> out;
if (config->importName.empty()) {
out.assign(sys::path::filename(config->outputFile));
if (asLib)
sys::path::replace_extension(out, ".dll");
} else {
out.assign(config->importName);
if (!sys::path::has_extension(out))
sys::path::replace_extension(out,
(config->dll || asLib) ? ".dll" : ".exe");
}
return out.str();
}
static void createImportLibrary(bool asLib) {
std::vector<COFFShortExport> exports;
for (Export &e1 : config->exports) {
COFFShortExport e2;
e2.Name = e1.name;
e2.SymbolName = e1.symbolName;
e2.ExtName = e1.extName;
e2.Ordinal = e1.ordinal;
e2.Noname = e1.noname;
e2.Data = e1.data;
e2.Private = e1.isPrivate;
e2.Constant = e1.constant;
exports.push_back(e2);
}
auto handleError = [](Error &&e) {
handleAllErrors(std::move(e),
[](ErrorInfoBase &eib) { error(eib.message()); });
};
std::string libName = getImportName(asLib);
std::string path = getImplibPath();
if (!config->incremental) {
handleError(writeImportLibrary(libName, path, exports, config->machine,
config->mingw));
return;
}
// If the import library already exists, replace it only if the contents
// have changed.
ErrorOr<std::unique_ptr<MemoryBuffer>> oldBuf = MemoryBuffer::getFile(
path, /*FileSize*/ -1, /*RequiresNullTerminator*/ false);
if (!oldBuf) {
handleError(writeImportLibrary(libName, path, exports, config->machine,
config->mingw));
return;
}
SmallString<128> tmpName;
if (std::error_code ec =
sys::fs::createUniqueFile(path + ".tmp-%%%%%%%%.lib", tmpName))
fatal("cannot create temporary file for import library " + path + ": " +
ec.message());
if (Error e = writeImportLibrary(libName, tmpName, exports, config->machine,
config->mingw)) {
handleError(std::move(e));
return;
}
std::unique_ptr<MemoryBuffer> newBuf = check(MemoryBuffer::getFile(
tmpName, /*FileSize*/ -1, /*RequiresNullTerminator*/ false));
if ((*oldBuf)->getBuffer() != newBuf->getBuffer()) {
oldBuf->reset();
handleError(errorCodeToError(sys::fs::rename(tmpName, path)));
} else {
sys::fs::remove(tmpName);
}
}
static void parseModuleDefs(StringRef path) {
std::unique_ptr<MemoryBuffer> mb = CHECK(
MemoryBuffer::getFile(path, -1, false, true), "could not open " + path);
COFFModuleDefinition m = check(parseCOFFModuleDefinition(
mb->getMemBufferRef(), config->machine, config->mingw));
if (config->outputFile.empty())
config->outputFile = saver.save(m.OutputFile);
config->importName = saver.save(m.ImportName);
if (m.ImageBase)
config->imageBase = m.ImageBase;
if (m.StackReserve)
config->stackReserve = m.StackReserve;
if (m.StackCommit)
config->stackCommit = m.StackCommit;
if (m.HeapReserve)
config->heapReserve = m.HeapReserve;
if (m.HeapCommit)
config->heapCommit = m.HeapCommit;
if (m.MajorImageVersion)
config->majorImageVersion = m.MajorImageVersion;
if (m.MinorImageVersion)
config->minorImageVersion = m.MinorImageVersion;
if (m.MajorOSVersion)
config->majorOSVersion = m.MajorOSVersion;
if (m.MinorOSVersion)
config->minorOSVersion = m.MinorOSVersion;
for (COFFShortExport e1 : m.Exports) {
Export e2;
// In simple cases, only Name is set. Renamed exports are parsed
// and set as "ExtName = Name". If Name has the form "OtherDll.Func",
// it shouldn't be a normal exported function but a forward to another
// DLL instead. This is supported by both MS and GNU linkers.
if (e1.ExtName != e1.Name && StringRef(e1.Name).contains('.')) {
e2.name = saver.save(e1.ExtName);
e2.forwardTo = saver.save(e1.Name);
config->exports.push_back(e2);
continue;
}
e2.name = saver.save(e1.Name);
e2.extName = saver.save(e1.ExtName);
e2.ordinal = e1.Ordinal;
e2.noname = e1.Noname;
e2.data = e1.Data;
e2.isPrivate = e1.Private;
e2.constant = e1.Constant;
config->exports.push_back(e2);
}
}
void LinkerDriver::enqueueTask(std::function<void()> task) {
taskQueue.push_back(std::move(task));
}
bool LinkerDriver::run() {
ScopedTimer t(inputFileTimer);
bool didWork = !taskQueue.empty();
while (!taskQueue.empty()) {
taskQueue.front()();
taskQueue.pop_front();
}
return didWork;
}
// Parse an /order file. If an option is given, the linker places
// COMDAT sections in the same order as their names appear in the
// given file.
static void parseOrderFile(StringRef arg) {
// For some reason, the MSVC linker requires a filename to be
// preceded by "@".
if (!arg.startswith("@")) {
error("malformed /order option: '@' missing");
return;
}
// Get a list of all comdat sections for error checking.
DenseSet<StringRef> set;
for (Chunk *c : symtab->getChunks())
if (auto *sec = dyn_cast<SectionChunk>(c))
if (sec->sym)
set.insert(sec->sym->getName());
// Open a file.
StringRef path = arg.substr(1);
std::unique_ptr<MemoryBuffer> mb = CHECK(
MemoryBuffer::getFile(path, -1, false, true), "could not open " + path);
// Parse a file. An order file contains one symbol per line.
// All symbols that were not present in a given order file are
// considered to have the lowest priority 0 and are placed at
// end of an output section.
for (std::string s : args::getLines(mb->getMemBufferRef())) {
if (config->machine == I386 && !isDecorated(s))
s = "_" + s;
if (set.count(s) == 0) {
if (config->warnMissingOrderSymbol)
warn("/order:" + arg + ": missing symbol: " + s + " [LNK4037]");
}
else
config->order[s] = INT_MIN + config->order.size();
}
}
static void markAddrsig(Symbol *s) {
if (auto *d = dyn_cast_or_null<Defined>(s))
if (SectionChunk *c = dyn_cast_or_null<SectionChunk>(d->getChunk()))
c->keepUnique = true;
}
static void findKeepUniqueSections() {
// Exported symbols could be address-significant in other executables or DSOs,
// so we conservatively mark them as address-significant.
for (Export &r : config->exports)
markAddrsig(r.sym);
// Visit the address-significance table in each object file and mark each
// referenced symbol as address-significant.
for (ObjFile *obj : ObjFile::instances) {
ArrayRef<Symbol *> syms = obj->getSymbols();
if (obj->addrsigSec) {
ArrayRef<uint8_t> contents;
cantFail(
obj->getCOFFObj()->getSectionContents(obj->addrsigSec, contents));
const uint8_t *cur = contents.begin();
while (cur != contents.end()) {
unsigned size;
const char *err;
uint64_t symIndex = decodeULEB128(cur, &size, contents.end(), &err);
if (err)
fatal(toString(obj) + ": could not decode addrsig section: " + err);
if (symIndex >= syms.size())
fatal(toString(obj) + ": invalid symbol index in addrsig section");
markAddrsig(syms[symIndex]);
cur += size;
}
} else {
// If an object file does not have an address-significance table,
// conservatively mark all of its symbols as address-significant.
for (Symbol *s : syms)
markAddrsig(s);
}
}
}
// link.exe replaces each %foo% in altPath with the contents of environment
// variable foo, and adds the two magic env vars _PDB (expands to the basename
// of pdb's output path) and _EXT (expands to the extension of the output
// binary).
// lld only supports %_PDB% and %_EXT% and warns on references to all other env
// vars.
static void parsePDBAltPath(StringRef altPath) {
SmallString<128> buf;
StringRef pdbBasename =
sys::path::filename(config->pdbPath, sys::path::Style::windows);
StringRef binaryExtension =
sys::path::extension(config->outputFile, sys::path::Style::windows);
if (!binaryExtension.empty())
binaryExtension = binaryExtension.substr(1); // %_EXT% does not include '.'.
// Invariant:
// +--------- cursor ('a...' might be the empty string).
// | +----- firstMark
// | | +- secondMark
// v v v
// a...%...%...
size_t cursor = 0;
while (cursor < altPath.size()) {
size_t firstMark, secondMark;
if ((firstMark = altPath.find('%', cursor)) == StringRef::npos ||
(secondMark = altPath.find('%', firstMark + 1)) == StringRef::npos) {
// Didn't find another full fragment, treat rest of string as literal.
buf.append(altPath.substr(cursor));
break;
}
// Found a full fragment. Append text in front of first %, and interpret
// text between first and second % as variable name.
buf.append(altPath.substr(cursor, firstMark - cursor));
StringRef var = altPath.substr(firstMark, secondMark - firstMark + 1);
if (var.equals_lower("%_pdb%"))
buf.append(pdbBasename);
else if (var.equals_lower("%_ext%"))
buf.append(binaryExtension);
else {
warn("only %_PDB% and %_EXT% supported in /pdbaltpath:, keeping " +
var + " as literal");
buf.append(var);
}
cursor = secondMark + 1;
}
config->pdbAltPath = buf;
}
/// Check that at most one resource obj file was used.
/// Call after ObjFile::Instances is complete.
static void diagnoseMultipleResourceObjFiles() {
// The .rsrc$01 section in a resource obj file contains a tree description
// of resources. Merging multiple resource obj files would require merging
// the trees instead of using usual linker section merging semantics.
// Since link.exe disallows linking more than one resource obj file with
// LNK4078, mirror that. The normal use of resource files is to give the
// linker many .res files, which are then converted to a single resource obj
// file internally, so this is not a big restriction in practice.
ObjFile *resourceObjFile = nullptr;
for (ObjFile *f : ObjFile::instances) {
if (!f->isResourceObjFile)
continue;
if (!resourceObjFile) {
resourceObjFile = f;
continue;
}
error(toString(f) +
": more than one resource obj file not allowed, already got " +
toString(resourceObjFile));
}
}
// In MinGW, if no symbols are chosen to be exported, then all symbols are
// automatically exported by default. This behavior can be forced by the
// -export-all-symbols option, so that it happens even when exports are
// explicitly specified. The automatic behavior can be disabled using the
// -exclude-all-symbols option, so that lld-link behaves like link.exe rather
// than MinGW in the case that nothing is explicitly exported.
void LinkerDriver::maybeExportMinGWSymbols(const opt::InputArgList &args) {
if (!config->dll)
return;
if (!args.hasArg(OPT_export_all_symbols)) {
if (!config->exports.empty())
return;
if (args.hasArg(OPT_exclude_all_symbols))
return;
}
AutoExporter exporter;
for (auto *arg : args.filtered(OPT_wholearchive_file))
if (Optional<StringRef> path = doFindFile(arg->getValue()))
exporter.addWholeArchive(*path);
symtab->forEachSymbol([&](Symbol *s) {
auto *def = dyn_cast<Defined>(s);
if (!exporter.shouldExport(def))
return;
Export e;
e.name = def->getName();
e.sym = def;
if (Chunk *c = def->getChunk())
if (!(c->getOutputCharacteristics() & IMAGE_SCN_MEM_EXECUTE))
e.data = true;
config->exports.push_back(e);
});
}
static const char *libcallRoutineNames[] = {
#define HANDLE_LIBCALL(code, name) name,
#include "llvm/IR/RuntimeLibcalls.def"
#undef HANDLE_LIBCALL
};
void LinkerDriver::link(ArrayRef<const char *> argsArr) {
// Needed for LTO.
InitializeAllTargetInfos();
InitializeAllTargets();
InitializeAllTargetMCs();
InitializeAllAsmParsers();
InitializeAllAsmPrinters();
// If the first command line argument is "/lib", link.exe acts like lib.exe.
// We call our own implementation of lib.exe that understands bitcode files.
if (argsArr.size() > 1 && StringRef(argsArr[1]).equals_lower("/lib")) {
if (llvm::libDriverMain(argsArr.slice(1)) != 0)
fatal("lib failed");
return;
}
// Parse command line options.
ArgParser parser;
opt::InputArgList args = parser.parseLINK(argsArr);
// Parse and evaluate -mllvm options.
std::vector<const char *> v;
v.push_back("lld-link (LLVM option parsing)");
for (auto *arg : args.filtered(OPT_mllvm))
v.push_back(arg->getValue());
cl::ParseCommandLineOptions(v.size(), v.data());
// Handle /errorlimit early, because error() depends on it.
if (auto *arg = args.getLastArg(OPT_errorlimit)) {
int n = 20;
StringRef s = arg->getValue();
if (s.getAsInteger(10, n))
error(arg->getSpelling() + " number expected, but got " + s);
errorHandler().errorLimit = n;
}
// Handle /help
if (args.hasArg(OPT_help)) {
printHelp(argsArr[0]);
return;
}
lld::threadsEnabled = args.hasFlag(OPT_threads, OPT_threads_no, true);
if (args.hasArg(OPT_show_timing))
config->showTiming = true;
config->showSummary = args.hasArg(OPT_summary);
ScopedTimer t(Timer::root());
// Handle --version, which is an lld extension. This option is a bit odd
// because it doesn't start with "/", but we deliberately chose "--" to
// avoid conflict with /version and for compatibility with clang-cl.
if (args.hasArg(OPT_dash_dash_version)) {
outs() << getLLDVersion() << "\n";
return;
}
// Handle /lldmingw early, since it can potentially affect how other
// options are handled.
config->mingw = args.hasArg(OPT_lldmingw);
if (auto *arg = args.getLastArg(OPT_linkrepro)) {
SmallString<64> path = StringRef(arg->getValue());
sys::path::append(path, "repro.tar");
Expected<std::unique_ptr<TarWriter>> errOrWriter =
TarWriter::create(path, "repro");
if (errOrWriter) {
tar = std::move(*errOrWriter);
} else {
error("/linkrepro: failed to open " + path + ": " +
toString(errOrWriter.takeError()));
}
}
- if (!args.hasArg(OPT_INPUT)) {
+ if (!args.hasArg(OPT_INPUT, OPT_wholearchive_file)) {
if (args.hasArg(OPT_deffile))
config->noEntry = true;
else
fatal("no input files");
}
// Construct search path list.
searchPaths.push_back("");
for (auto *arg : args.filtered(OPT_libpath))
searchPaths.push_back(arg->getValue());
addLibSearchPaths();
// Handle /ignore
for (auto *arg : args.filtered(OPT_ignore)) {
SmallVector<StringRef, 8> vec;
StringRef(arg->getValue()).split(vec, ',');
for (StringRef s : vec) {
if (s == "4037")
config->warnMissingOrderSymbol = false;
else if (s == "4099")
config->warnDebugInfoUnusable = false;
else if (s == "4217")
config->warnLocallyDefinedImported = false;
// Other warning numbers are ignored.
}
}
// Handle /out
if (auto *arg = args.getLastArg(OPT_out))
config->outputFile = arg->getValue();
// Handle /verbose
if (args.hasArg(OPT_verbose))
config->verbose = true;
errorHandler().verbose = config->verbose;
// Handle /force or /force:unresolved
if (args.hasArg(OPT_force, OPT_force_unresolved))
config->forceUnresolved = true;
// Handle /force or /force:multiple
if (args.hasArg(OPT_force, OPT_force_multiple))
config->forceMultiple = true;
// Handle /force or /force:multipleres
if (args.hasArg(OPT_force, OPT_force_multipleres))
config->forceMultipleRes = true;
// Handle /debug
DebugKind debug = parseDebugKind(args);
if (debug == DebugKind::Full || debug == DebugKind::Dwarf ||
debug == DebugKind::GHash) {
config->debug = true;
config->incremental = true;
}
// Handle /demangle
config->demangle = args.hasFlag(OPT_demangle, OPT_demangle_no);
// Handle /debugtype
config->debugTypes = parseDebugTypes(args);
// Handle /pdb
bool shouldCreatePDB =
(debug == DebugKind::Full || debug == DebugKind::GHash);
if (shouldCreatePDB) {
if (auto *arg = args.getLastArg(OPT_pdb))
config->pdbPath = arg->getValue();
if (auto *arg = args.getLastArg(OPT_pdbaltpath))
config->pdbAltPath = arg->getValue();
if (args.hasArg(OPT_natvis))
config->natvisFiles = args.getAllArgValues(OPT_natvis);
if (auto *arg = args.getLastArg(OPT_pdb_source_path))
config->pdbSourcePath = arg->getValue();
}
// Handle /noentry
if (args.hasArg(OPT_noentry)) {
if (args.hasArg(OPT_dll))
config->noEntry = true;
else
error("/noentry must be specified with /dll");
}
// Handle /dll
if (args.hasArg(OPT_dll)) {
config->dll = true;
config->manifestID = 2;
}
// Handle /dynamicbase and /fixed. We can't use hasFlag for /dynamicbase
// because we need to explicitly check whether that option or its inverse was
// present in the argument list in order to handle /fixed.
auto *dynamicBaseArg = args.getLastArg(OPT_dynamicbase, OPT_dynamicbase_no);
if (dynamicBaseArg &&
dynamicBaseArg->getOption().getID() == OPT_dynamicbase_no)
config->dynamicBase = false;
// MSDN claims "/FIXED:NO is the default setting for a DLL, and /FIXED is the
// default setting for any other project type.", but link.exe defaults to
// /FIXED:NO for exe outputs as well. Match behavior, not docs.
bool fixed = args.hasFlag(OPT_fixed, OPT_fixed_no, false);
if (fixed) {
if (dynamicBaseArg &&
dynamicBaseArg->getOption().getID() == OPT_dynamicbase) {
error("/fixed must not be specified with /dynamicbase");
} else {
config->relocatable = false;
config->dynamicBase = false;
}
}
// Handle /appcontainer
config->appContainer =
args.hasFlag(OPT_appcontainer, OPT_appcontainer_no, false);
// Handle /machine
if (auto *arg = args.getLastArg(OPT_machine)) {
config->machine = getMachineType(arg->getValue());
if (config->machine == IMAGE_FILE_MACHINE_UNKNOWN)
fatal(Twine("unknown /machine argument: ") + arg->getValue());
}
// Handle /nodefaultlib:<filename>
for (auto *arg : args.filtered(OPT_nodefaultlib))
config->noDefaultLibs.insert(doFindLib(arg->getValue()).lower());
// Handle /nodefaultlib
if (args.hasArg(OPT_nodefaultlib_all))
config->noDefaultLibAll = true;
// Handle /base
if (auto *arg = args.getLastArg(OPT_base))
parseNumbers(arg->getValue(), &config->imageBase);
// Handle /filealign
if (auto *arg = args.getLastArg(OPT_filealign)) {
parseNumbers(arg->getValue(), &config->fileAlign);
if (!isPowerOf2_64(config->fileAlign))
error("/filealign: not a power of two: " + Twine(config->fileAlign));
}
// Handle /stack
if (auto *arg = args.getLastArg(OPT_stack))
parseNumbers(arg->getValue(), &config->stackReserve, &config->stackCommit);
// Handle /guard:cf
if (auto *arg = args.getLastArg(OPT_guard))
parseGuard(arg->getValue());
// Handle /heap
if (auto *arg = args.getLastArg(OPT_heap))
parseNumbers(arg->getValue(), &config->heapReserve, &config->heapCommit);
// Handle /version
if (auto *arg = args.getLastArg(OPT_version))
parseVersion(arg->getValue(), &config->majorImageVersion,
&config->minorImageVersion);
// Handle /subsystem
if (auto *arg = args.getLastArg(OPT_subsystem))
parseSubsystem(arg->getValue(), &config->subsystem, &config->majorOSVersion,
&config->minorOSVersion);
// Handle /timestamp
if (llvm::opt::Arg *arg = args.getLastArg(OPT_timestamp, OPT_repro)) {
if (arg->getOption().getID() == OPT_repro) {
config->timestamp = 0;
config->repro = true;
} else {
config->repro = false;
StringRef value(arg->getValue());
if (value.getAsInteger(0, config->timestamp))
fatal(Twine("invalid timestamp: ") + value +
". Expected 32-bit integer");
}
} else {
config->repro = false;
config->timestamp = time(nullptr);
}
// Handle /alternatename
for (auto *arg : args.filtered(OPT_alternatename))
parseAlternateName(arg->getValue());
// Handle /include
for (auto *arg : args.filtered(OPT_incl))
addUndefined(arg->getValue());
// Handle /implib
if (auto *arg = args.getLastArg(OPT_implib))
config->implib = arg->getValue();
// Handle /opt.
bool doGC = debug == DebugKind::None || args.hasArg(OPT_profile);
unsigned icfLevel =
args.hasArg(OPT_profile) ? 0 : 1; // 0: off, 1: limited, 2: on
unsigned tailMerge = 1;
for (auto *arg : args.filtered(OPT_opt)) {
std::string str = StringRef(arg->getValue()).lower();
SmallVector<StringRef, 1> vec;
StringRef(str).split(vec, ',');
for (StringRef s : vec) {
if (s == "ref") {
doGC = true;
} else if (s == "noref") {
doGC = false;
} else if (s == "icf" || s.startswith("icf=")) {
icfLevel = 2;
} else if (s == "noicf") {
icfLevel = 0;
} else if (s == "lldtailmerge") {
tailMerge = 2;
} else if (s == "nolldtailmerge") {
tailMerge = 0;
} else if (s.startswith("lldlto=")) {
StringRef optLevel = s.substr(7);
if (optLevel.getAsInteger(10, config->ltoo) || config->ltoo > 3)
error("/opt:lldlto: invalid optimization level: " + optLevel);
} else if (s.startswith("lldltojobs=")) {
StringRef jobs = s.substr(11);
if (jobs.getAsInteger(10, config->thinLTOJobs) ||
config->thinLTOJobs == 0)
error("/opt:lldltojobs: invalid job count: " + jobs);
} else if (s.startswith("lldltopartitions=")) {
StringRef n = s.substr(17);
if (n.getAsInteger(10, config->ltoPartitions) ||
config->ltoPartitions == 0)
error("/opt:lldltopartitions: invalid partition count: " + n);
} else if (s != "lbr" && s != "nolbr")
error("/opt: unknown option: " + s);
}
}
// Limited ICF is enabled if GC is enabled and ICF was never mentioned
// explicitly.
// FIXME: LLD only implements "limited" ICF, i.e. it only merges identical
// code. If the user passes /OPT:ICF explicitly, LLD should merge identical
// comdat readonly data.
if (icfLevel == 1 && !doGC)
icfLevel = 0;
config->doGC = doGC;
config->doICF = icfLevel > 0;
config->tailMerge = (tailMerge == 1 && config->doICF) || tailMerge == 2;
// Handle /lldsavetemps
if (args.hasArg(OPT_lldsavetemps))
config->saveTemps = true;
// Handle /kill-at
if (args.hasArg(OPT_kill_at))
config->killAt = true;
// Handle /lldltocache
if (auto *arg = args.getLastArg(OPT_lldltocache))
config->ltoCache = arg->getValue();
// Handle /lldsavecachepolicy
if (auto *arg = args.getLastArg(OPT_lldltocachepolicy))
config->ltoCachePolicy = CHECK(
parseCachePruningPolicy(arg->getValue()),
Twine("/lldltocachepolicy: invalid cache policy: ") + arg->getValue());
// Handle /failifmismatch
for (auto *arg : args.filtered(OPT_failifmismatch))
checkFailIfMismatch(arg->getValue(), nullptr);
// Handle /merge
for (auto *arg : args.filtered(OPT_merge))
parseMerge(arg->getValue());
// Add default section merging rules after user rules. User rules take
// precedence, but we will emit a warning if there is a conflict.
parseMerge(".idata=.rdata");
parseMerge(".didat=.rdata");
parseMerge(".edata=.rdata");
parseMerge(".xdata=.rdata");
parseMerge(".bss=.data");
if (config->mingw) {
parseMerge(".ctors=.rdata");
parseMerge(".dtors=.rdata");
parseMerge(".CRT=.rdata");
}
// Handle /section
for (auto *arg : args.filtered(OPT_section))
parseSection(arg->getValue());
// Handle /align
if (auto *arg = args.getLastArg(OPT_align)) {
parseNumbers(arg->getValue(), &config->align);
if (!isPowerOf2_64(config->align))
error("/align: not a power of two: " + StringRef(arg->getValue()));
}
// Handle /aligncomm
for (auto *arg : args.filtered(OPT_aligncomm))
parseAligncomm(arg->getValue());
// Handle /manifestdependency. This enables /manifest unless /manifest:no is
// also passed.
if (auto *arg = args.getLastArg(OPT_manifestdependency)) {
config->manifestDependency = arg->getValue();
config->manifest = Configuration::SideBySide;
}
// Handle /manifest and /manifest:
if (auto *arg = args.getLastArg(OPT_manifest, OPT_manifest_colon)) {
if (arg->getOption().getID() == OPT_manifest)
config->manifest = Configuration::SideBySide;
else
parseManifest(arg->getValue());
}
// Handle /manifestuac
if (auto *arg = args.getLastArg(OPT_manifestuac))
parseManifestUAC(arg->getValue());
// Handle /manifestfile
if (auto *arg = args.getLastArg(OPT_manifestfile))
config->manifestFile = arg->getValue();
// Handle /manifestinput
for (auto *arg : args.filtered(OPT_manifestinput))
config->manifestInput.push_back(arg->getValue());
if (!config->manifestInput.empty() &&
config->manifest != Configuration::Embed) {
fatal("/manifestinput: requires /manifest:embed");
}
config->thinLTOEmitImportsFiles = args.hasArg(OPT_thinlto_emit_imports_files);
config->thinLTOIndexOnly = args.hasArg(OPT_thinlto_index_only) ||
args.hasArg(OPT_thinlto_index_only_arg);
config->thinLTOIndexOnlyArg =
args.getLastArgValue(OPT_thinlto_index_only_arg);
config->thinLTOPrefixReplace =
getOldNewOptions(args, OPT_thinlto_prefix_replace);
config->thinLTOObjectSuffixReplace =
getOldNewOptions(args, OPT_thinlto_object_suffix_replace);
// Handle miscellaneous boolean flags.
config->allowBind = args.hasFlag(OPT_allowbind, OPT_allowbind_no, true);
config->allowIsolation =
args.hasFlag(OPT_allowisolation, OPT_allowisolation_no, true);
config->incremental =
args.hasFlag(OPT_incremental, OPT_incremental_no,
!config->doGC && !config->doICF && !args.hasArg(OPT_order) &&
!args.hasArg(OPT_profile));
config->integrityCheck =
args.hasFlag(OPT_integritycheck, OPT_integritycheck_no, false);
config->nxCompat = args.hasFlag(OPT_nxcompat, OPT_nxcompat_no, true);
for (auto *arg : args.filtered(OPT_swaprun))
parseSwaprun(arg->getValue());
config->terminalServerAware =
!config->dll && args.hasFlag(OPT_tsaware, OPT_tsaware_no, true);
config->debugDwarf = debug == DebugKind::Dwarf;
config->debugGHashes = debug == DebugKind::GHash;
config->debugSymtab = debug == DebugKind::Symtab;
config->mapFile = getMapFile(args);
if (config->incremental && args.hasArg(OPT_profile)) {
warn("ignoring '/incremental' due to '/profile' specification");
config->incremental = false;
}
if (config->incremental && args.hasArg(OPT_order)) {
warn("ignoring '/incremental' due to '/order' specification");
config->incremental = false;
}
if (config->incremental && config->doGC) {
warn("ignoring '/incremental' because REF is enabled; use '/opt:noref' to "
"disable");
config->incremental = false;
}
if (config->incremental && config->doICF) {
warn("ignoring '/incremental' because ICF is enabled; use '/opt:noicf' to "
"disable");
config->incremental = false;
}
if (errorCount())
return;
std::set<sys::fs::UniqueID> wholeArchives;
for (auto *arg : args.filtered(OPT_wholearchive_file))
if (Optional<StringRef> path = doFindFile(arg->getValue()))
if (Optional<sys::fs::UniqueID> id = getUniqueID(*path))
wholeArchives.insert(*id);
// A predicate returning true if a given path is an argument for
// /wholearchive:, or /wholearchive is enabled globally.
// This function is a bit tricky because "foo.obj /wholearchive:././foo.obj"
// needs to be handled as "/wholearchive:foo.obj foo.obj".
auto isWholeArchive = [&](StringRef path) -> bool {
if (args.hasArg(OPT_wholearchive_flag))
return true;
if (Optional<sys::fs::UniqueID> id = getUniqueID(path))
return wholeArchives.count(*id);
return false;
};
// Create a list of input files. Files can be given as arguments
// for /defaultlib option.
for (auto *arg : args.filtered(OPT_INPUT, OPT_wholearchive_file))
if (Optional<StringRef> path = findFile(arg->getValue()))
enqueuePath(*path, isWholeArchive(*path));
for (auto *arg : args.filtered(OPT_defaultlib))
if (Optional<StringRef> path = findLib(arg->getValue()))
enqueuePath(*path, false);
// Windows specific -- Create a resource file containing a manifest file.
if (config->manifest == Configuration::Embed)
addBuffer(createManifestRes(), false);
// Read all input files given via the command line.
run();
if (errorCount())
return;
// We should have inferred a machine type by now from the input files, but if
// not we assume x64.
if (config->machine == IMAGE_FILE_MACHINE_UNKNOWN) {
warn("/machine is not specified. x64 is assumed");
config->machine = AMD64;
}
config->wordsize = config->is64() ? 8 : 4;
// Handle /safeseh, x86 only, on by default, except for mingw.
if (config->machine == I386 &&
args.hasFlag(OPT_safeseh, OPT_safeseh_no, !config->mingw))
config->safeSEH = true;
// Handle /functionpadmin
for (auto *arg : args.filtered(OPT_functionpadmin, OPT_functionpadmin_opt))
parseFunctionPadMin(arg, config->machine);
// Input files can be Windows resource files (.res files). We use
// WindowsResource to convert resource files to a regular COFF file,
// then link the resulting file normally.
if (!resources.empty())
symtab->addFile(make<ObjFile>(convertResToCOFF(resources)));
if (tar)
tar->append("response.txt",
createResponseFile(args, filePaths,
ArrayRef<StringRef>(searchPaths).slice(1)));
// Handle /largeaddressaware
config->largeAddressAware = args.hasFlag(
OPT_largeaddressaware, OPT_largeaddressaware_no, config->is64());
// Handle /highentropyva
config->highEntropyVA =
config->is64() &&
args.hasFlag(OPT_highentropyva, OPT_highentropyva_no, true);
if (!config->dynamicBase &&
(config->machine == ARMNT || config->machine == ARM64))
error("/dynamicbase:no is not compatible with " +
machineToStr(config->machine));
// Handle /export
for (auto *arg : args.filtered(OPT_export)) {
Export e = parseExport(arg->getValue());
if (config->machine == I386) {
if (!isDecorated(e.name))
e.name = saver.save("_" + e.name);
if (!e.extName.empty() && !isDecorated(e.extName))
e.extName = saver.save("_" + e.extName);
}
config->exports.push_back(e);
}
// Handle /def
if (auto *arg = args.getLastArg(OPT_deffile)) {
// parseModuleDefs mutates Config object.
parseModuleDefs(arg->getValue());
}
// Handle generation of import library from a def file.
- if (!args.hasArg(OPT_INPUT)) {
+ if (!args.hasArg(OPT_INPUT, OPT_wholearchive_file)) {
fixupExports();
createImportLibrary(/*asLib=*/true);
return;
}
// Windows specific -- if no /subsystem is given, we need to infer
// that from entry point name. Must happen before /entry handling,
// and after the early return when just writing an import library.
if (config->subsystem == IMAGE_SUBSYSTEM_UNKNOWN) {
config->subsystem = inferSubsystem();
if (config->subsystem == IMAGE_SUBSYSTEM_UNKNOWN)
fatal("subsystem must be defined");
}
// Handle /entry and /dll
if (auto *arg = args.getLastArg(OPT_entry)) {
config->entry = addUndefined(mangle(arg->getValue()));
} else if (!config->entry && !config->noEntry) {
if (args.hasArg(OPT_dll)) {
StringRef s = (config->machine == I386) ? "__DllMainCRTStartup@12"
: "_DllMainCRTStartup";
config->entry = addUndefined(s);
} else {
// Windows specific -- If entry point name is not given, we need to
// infer that from user-defined entry name.
StringRef s = findDefaultEntry();
if (s.empty())
fatal("entry point must be defined");
config->entry = addUndefined(s);
log("Entry name inferred: " + s);
}
}
// Handle /delayload
for (auto *arg : args.filtered(OPT_delayload)) {
config->delayLoads.insert(StringRef(arg->getValue()).lower());
if (config->machine == I386) {
config->delayLoadHelper = addUndefined("___delayLoadHelper2@8");
} else {
config->delayLoadHelper = addUndefined("__delayLoadHelper2");
}
}
// Set default image name if neither /out or /def set it.
if (config->outputFile.empty()) {
- config->outputFile =
- getOutputPath((*args.filtered(OPT_INPUT).begin())->getValue());
+ config->outputFile = getOutputPath(
+ (*args.filtered(OPT_INPUT, OPT_wholearchive_file).begin())->getValue());
}
// Fail early if an output file is not writable.
if (auto e = tryCreateFile(config->outputFile)) {
error("cannot open output file " + config->outputFile + ": " + e.message());
return;
}
if (shouldCreatePDB) {
// Put the PDB next to the image if no /pdb flag was passed.
if (config->pdbPath.empty()) {
config->pdbPath = config->outputFile;
sys::path::replace_extension(config->pdbPath, ".pdb");
}
// The embedded PDB path should be the absolute path to the PDB if no
// /pdbaltpath flag was passed.
if (config->pdbAltPath.empty()) {
config->pdbAltPath = config->pdbPath;
// It's important to make the path absolute and remove dots. This path
// will eventually be written into the PE header, and certain Microsoft
// tools won't work correctly if these assumptions are not held.
sys::fs::make_absolute(config->pdbAltPath);
sys::path::remove_dots(config->pdbAltPath);
} else {
// Don't do this earlier, so that Config->OutputFile is ready.
parsePDBAltPath(config->pdbAltPath);
}
}
// Set default image base if /base is not given.
if (config->imageBase == uint64_t(-1))
config->imageBase = getDefaultImageBase();
symtab->addSynthetic(mangle("__ImageBase"), nullptr);
if (config->machine == I386) {
symtab->addAbsolute("___safe_se_handler_table", 0);
symtab->addAbsolute("___safe_se_handler_count", 0);
}
symtab->addAbsolute(mangle("__guard_fids_count"), 0);
symtab->addAbsolute(mangle("__guard_fids_table"), 0);
symtab->addAbsolute(mangle("__guard_flags"), 0);
symtab->addAbsolute(mangle("__guard_iat_count"), 0);
symtab->addAbsolute(mangle("__guard_iat_table"), 0);
symtab->addAbsolute(mangle("__guard_longjmp_count"), 0);
symtab->addAbsolute(mangle("__guard_longjmp_table"), 0);
// Needed for MSVC 2017 15.5 CRT.
symtab->addAbsolute(mangle("__enclave_config"), 0);
if (config->mingw) {
symtab->addAbsolute(mangle("__RUNTIME_PSEUDO_RELOC_LIST__"), 0);
symtab->addAbsolute(mangle("__RUNTIME_PSEUDO_RELOC_LIST_END__"), 0);
symtab->addAbsolute(mangle("__CTOR_LIST__"), 0);
symtab->addAbsolute(mangle("__DTOR_LIST__"), 0);
}
// This code may add new undefined symbols to the link, which may enqueue more
// symbol resolution tasks, so we need to continue executing tasks until we
// converge.
do {
// Windows specific -- if entry point is not found,
// search for its mangled names.
if (config->entry)
mangleMaybe(config->entry);
// Windows specific -- Make sure we resolve all dllexported symbols.
for (Export &e : config->exports) {
if (!e.forwardTo.empty())
continue;
e.sym = addUndefined(e.name);
if (!e.directives)
e.symbolName = mangleMaybe(e.sym);
}
// Add weak aliases. Weak aliases is a mechanism to give remaining
// undefined symbols final chance to be resolved successfully.
for (auto pair : config->alternateNames) {
StringRef from = pair.first;
StringRef to = pair.second;
Symbol *sym = symtab->find(from);
if (!sym)
continue;
if (auto *u = dyn_cast<Undefined>(sym))
if (!u->weakAlias)
u->weakAlias = symtab->addUndefined(to);
}
// If any inputs are bitcode files, the LTO code generator may create
// references to library functions that are not explicit in the bitcode
// file's symbol table. If any of those library functions are defined in a
// bitcode file in an archive member, we need to arrange to use LTO to
// compile those archive members by adding them to the link beforehand.
if (!BitcodeFile::instances.empty())
for (const char *s : libcallRoutineNames)
symtab->addLibcall(s);
// Windows specific -- if __load_config_used can be resolved, resolve it.
if (symtab->findUnderscore("_load_config_used"))
addUndefined(mangle("_load_config_used"));
} while (run());
if (errorCount())
return;
// Do LTO by compiling bitcode input files to a set of native COFF files then
// link those files (unless -thinlto-index-only was given, in which case we
// resolve symbols and write indices, but don't generate native code or link).
symtab->addCombinedLTOObjects();
// If -thinlto-index-only is given, we should create only "index
// files" and not object files. Index file creation is already done
// in addCombinedLTOObject, so we are done if that's the case.
if (config->thinLTOIndexOnly)
return;
// If we generated native object files from bitcode files, this resolves
// references to the symbols we use from them.
run();
if (args.hasArg(OPT_include_optional)) {
// Handle /includeoptional
for (auto *arg : args.filtered(OPT_include_optional))
if (dyn_cast_or_null<Lazy>(symtab->find(arg->getValue())))
addUndefined(arg->getValue());
while (run());
}
if (config->mingw) {
// Load any further object files that might be needed for doing automatic
// imports.
//
// For cases with no automatically imported symbols, this iterates once
// over the symbol table and doesn't do anything.
//
// For the normal case with a few automatically imported symbols, this
// should only need to be run once, since each new object file imported
// is an import library and wouldn't add any new undefined references,
// but there's nothing stopping the __imp_ symbols from coming from a
// normal object file as well (although that won't be used for the
// actual autoimport later on). If this pass adds new undefined references,
// we won't iterate further to resolve them.
symtab->loadMinGWAutomaticImports();
run();
}
// Make sure we have resolved all symbols.
symtab->reportRemainingUndefines();
if (errorCount())
return;
if (config->mingw) {
// In MinGW, all symbols are automatically exported if no symbols
// are chosen to be exported.
maybeExportMinGWSymbols(args);
// Make sure the crtend.o object is the last object file. This object
// file can contain terminating section chunks that need to be placed
// last. GNU ld processes files and static libraries explicitly in the
// order provided on the command line, while lld will pull in needed
// files from static libraries only after the last object file on the
// command line.
for (auto i = ObjFile::instances.begin(), e = ObjFile::instances.end();
i != e; i++) {
ObjFile *file = *i;
if (isCrtend(file->getName())) {
ObjFile::instances.erase(i);
ObjFile::instances.push_back(file);
break;
}
}
}
// Windows specific -- when we are creating a .dll file, we also
// need to create a .lib file.
if (!config->exports.empty() || config->dll) {
fixupExports();
createImportLibrary(/*asLib=*/false);
assignExportOrdinals();
}
// Handle /output-def (MinGW specific).
if (auto *arg = args.getLastArg(OPT_output_def))
writeDefFile(arg->getValue());
// Set extra alignment for .comm symbols
for (auto pair : config->alignComm) {
StringRef name = pair.first;
uint32_t alignment = pair.second;
Symbol *sym = symtab->find(name);
if (!sym) {
warn("/aligncomm symbol " + name + " not found");
continue;
}
// If the symbol isn't common, it must have been replaced with a regular
// symbol, which will carry its own alignment.
auto *dc = dyn_cast<DefinedCommon>(sym);
if (!dc)
continue;
CommonChunk *c = dc->getChunk();
c->setAlignment(std::max(c->getAlignment(), alignment));
}
// Windows specific -- Create a side-by-side manifest file.
if (config->manifest == Configuration::SideBySide)
createSideBySideManifest();
// Handle /order. We want to do this at this moment because we
// need a complete list of comdat sections to warn on nonexistent
// functions.
if (auto *arg = args.getLastArg(OPT_order))
parseOrderFile(arg->getValue());
// Identify unreferenced COMDAT sections.
if (config->doGC)
markLive(symtab->getChunks());
// Needs to happen after the last call to addFile().
diagnoseMultipleResourceObjFiles();
// Identify identical COMDAT sections to merge them.
if (config->doICF) {
findKeepUniqueSections();
doICF(symtab->getChunks());
}
// Write the result.
writeResult();
// Stop early so we can print the results.
Timer::root().stop();
if (config->showTiming)
Timer::root().print();
}
} // namespace coff
} // namespace lld
Index: stable/12/contrib/llvm-project/lld/ELF/Symbols.h
===================================================================
--- stable/12/contrib/llvm-project/lld/ELF/Symbols.h (revision 356464)
+++ stable/12/contrib/llvm-project/lld/ELF/Symbols.h (revision 356465)
@@ -1,558 +1,558 @@
//===- Symbols.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines various types of Symbols.
//
//===----------------------------------------------------------------------===//
#ifndef LLD_ELF_SYMBOLS_H
#define LLD_ELF_SYMBOLS_H
#include "InputFiles.h"
#include "InputSection.h"
#include "lld/Common/LLVM.h"
#include "lld/Common/Strings.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ELF.h"
namespace lld {
namespace elf {
class CommonSymbol;
class Defined;
class InputFile;
class LazyArchive;
class LazyObject;
class SharedSymbol;
class Symbol;
class Undefined;
} // namespace elf
std::string toString(const elf::Symbol &);
// There are two different ways to convert an Archive::Symbol to a string:
// One for Microsoft name mangling and one for Itanium name mangling.
// Call the functions toCOFFString and toELFString, not just toString.
std::string toELFString(const elf::Archive::Symbol &);
namespace elf {
// This is a StringRef-like container that doesn't run strlen().
//
// ELF string tables contain a lot of null-terminated strings. Most of them
// are not necessary for the linker because they are names of local symbols,
// and the linker doesn't use local symbol names for name resolution. So, we
// use this class to represents strings read from string tables.
struct StringRefZ {
StringRefZ(const char *s) : data(s), size(-1) {}
StringRefZ(StringRef s) : data(s.data()), size(s.size()) {}
const char *data;
const uint32_t size;
};
// The base class for real symbol classes.
class Symbol {
public:
enum Kind {
PlaceholderKind,
DefinedKind,
CommonKind,
SharedKind,
UndefinedKind,
LazyArchiveKind,
LazyObjectKind,
};
Kind kind() const { return static_cast<Kind>(symbolKind); }
// The file from which this symbol was created.
InputFile *file;
protected:
const char *nameData;
mutable uint32_t nameSize;
public:
uint32_t dynsymIndex = 0;
uint32_t gotIndex = -1;
uint32_t pltIndex = -1;
uint32_t globalDynIndex = -1;
// This field is a index to the symbol's version definition.
uint32_t verdefIndex = -1;
// Version definition index.
uint16_t versionId;
// An index into the .branch_lt section on PPC64.
uint16_t ppc64BranchltIndex = -1;
// Symbol binding. This is not overwritten by replace() to track
// changes during resolution. In particular:
// - An undefined weak is still weak when it resolves to a shared library.
// - An undefined weak will not fetch archive members, but we have to
// remember it is weak.
uint8_t binding;
// The following fields have the same meaning as the ELF symbol attributes.
uint8_t type; // symbol type
uint8_t stOther; // st_other field value
uint8_t symbolKind;
// Symbol visibility. This is the computed minimum visibility of all
// observed non-DSO symbols.
- unsigned visibility : 2;
+ uint8_t visibility : 2;
// True if the symbol was used for linking and thus need to be added to the
// output file's symbol table. This is true for all symbols except for
// unreferenced DSO symbols, lazy (archive) symbols, and bitcode symbols that
// are unreferenced except by other bitcode objects.
- unsigned isUsedInRegularObj : 1;
+ uint8_t isUsedInRegularObj : 1;
// If this flag is true and the symbol has protected or default visibility, it
// will appear in .dynsym. This flag is set by interposable DSO symbols in
// executables, by most symbols in DSOs and executables built with
// --export-dynamic, and by dynamic lists.
- unsigned exportDynamic : 1;
+ uint8_t exportDynamic : 1;
// False if LTO shouldn't inline whatever this symbol points to. If a symbol
// is overwritten after LTO, LTO shouldn't inline the symbol because it
// doesn't know the final contents of the symbol.
- unsigned canInline : 1;
+ uint8_t canInline : 1;
// True if this symbol is specified by --trace-symbol option.
- unsigned traced : 1;
+ uint8_t traced : 1;
inline void replace(const Symbol &New);
bool includeInDynsym() const;
uint8_t computeBinding() const;
bool isWeak() const { return binding == llvm::ELF::STB_WEAK; }
bool isUndefined() const { return symbolKind == UndefinedKind; }
bool isCommon() const { return symbolKind == CommonKind; }
bool isDefined() const { return symbolKind == DefinedKind; }
bool isShared() const { return symbolKind == SharedKind; }
bool isPlaceholder() const { return symbolKind == PlaceholderKind; }
bool isLocal() const { return binding == llvm::ELF::STB_LOCAL; }
bool isLazy() const {
return symbolKind == LazyArchiveKind || symbolKind == LazyObjectKind;
}
// True if this is an undefined weak symbol. This only works once
// all input files have been added.
bool isUndefWeak() const {
// See comment on lazy symbols for details.
return isWeak() && (isUndefined() || isLazy());
}
StringRef getName() const {
if (nameSize == (uint32_t)-1)
nameSize = strlen(nameData);
return {nameData, nameSize};
}
void setName(StringRef s) {
nameData = s.data();
nameSize = s.size();
}
void parseSymbolVersion();
bool isInGot() const { return gotIndex != -1U; }
bool isInPlt() const { return pltIndex != -1U; }
bool isInPPC64Branchlt() const { return ppc64BranchltIndex != 0xffff; }
uint64_t getVA(int64_t addend = 0) const;
uint64_t getGotOffset() const;
uint64_t getGotVA() const;
uint64_t getGotPltOffset() const;
uint64_t getGotPltVA() const;
uint64_t getPltVA() const;
uint64_t getPPC64LongBranchTableVA() const;
uint64_t getPPC64LongBranchOffset() const;
uint64_t getSize() const;
OutputSection *getOutputSection() const;
// The following two functions are used for symbol resolution.
//
// You are expected to call mergeProperties for all symbols in input
// files so that attributes that are attached to names rather than
// indivisual symbol (such as visibility) are merged together.
//
// Every time you read a new symbol from an input, you are supposed
// to call resolve() with the new symbol. That function replaces
// "this" object as a result of name resolution if the new symbol is
// more appropriate to be included in the output.
//
// For example, if "this" is an undefined symbol and a new symbol is
// a defined symbol, "this" is replaced with the new symbol.
void mergeProperties(const Symbol &other);
void resolve(const Symbol &other);
// If this is a lazy symbol, fetch an input file and add the symbol
// in the file to the symbol table. Calling this function on
// non-lazy object causes a runtime error.
void fetch() const;
private:
static bool isExportDynamic(Kind k, uint8_t visibility) {
if (k == SharedKind)
return visibility == llvm::ELF::STV_DEFAULT;
return config->shared || config->exportDynamic;
}
void resolveUndefined(const Undefined &other);
void resolveCommon(const CommonSymbol &other);
void resolveDefined(const Defined &other);
template <class LazyT> void resolveLazy(const LazyT &other);
void resolveShared(const SharedSymbol &other);
int compare(const Symbol *other) const;
inline size_t getSymbolSize() const;
protected:
Symbol(Kind k, InputFile *file, StringRefZ name, uint8_t binding,
uint8_t stOther, uint8_t type)
: file(file), nameData(name.data), nameSize(name.size), binding(binding),
type(type), stOther(stOther), symbolKind(k), visibility(stOther & 3),
isUsedInRegularObj(!file || file->kind() == InputFile::ObjKind),
exportDynamic(isExportDynamic(k, visibility)), canInline(false),
traced(false), needsPltAddr(false), isInIplt(false), gotInIgot(false),
isPreemptible(false), used(!config->gcSections), needsTocRestore(false),
scriptDefined(false) {}
public:
// True the symbol should point to its PLT entry.
// For SharedSymbol only.
- unsigned needsPltAddr : 1;
+ uint8_t needsPltAddr : 1;
// True if this symbol is in the Iplt sub-section of the Plt and the Igot
// sub-section of the .got.plt or .got.
- unsigned isInIplt : 1;
+ uint8_t isInIplt : 1;
// True if this symbol needs a GOT entry and its GOT entry is actually in
// Igot. This will be true only for certain non-preemptible ifuncs.
- unsigned gotInIgot : 1;
+ uint8_t gotInIgot : 1;
// True if this symbol is preemptible at load time.
- unsigned isPreemptible : 1;
+ uint8_t isPreemptible : 1;
// True if an undefined or shared symbol is used from a live section.
- unsigned used : 1;
+ uint8_t used : 1;
// True if a call to this symbol needs to be followed by a restore of the
// PPC64 toc pointer.
- unsigned needsTocRestore : 1;
+ uint8_t needsTocRestore : 1;
// True if this symbol is defined by a linker script.
- unsigned scriptDefined : 1;
+ uint8_t scriptDefined : 1;
// The partition whose dynamic symbol table contains this symbol's definition.
uint8_t partition = 1;
bool isSection() const { return type == llvm::ELF::STT_SECTION; }
bool isTls() const { return type == llvm::ELF::STT_TLS; }
bool isFunc() const { return type == llvm::ELF::STT_FUNC; }
bool isGnuIFunc() const { return type == llvm::ELF::STT_GNU_IFUNC; }
bool isObject() const { return type == llvm::ELF::STT_OBJECT; }
bool isFile() const { return type == llvm::ELF::STT_FILE; }
};
// Represents a symbol that is defined in the current output file.
class Defined : public Symbol {
public:
Defined(InputFile *file, StringRefZ name, uint8_t binding, uint8_t stOther,
uint8_t type, uint64_t value, uint64_t size, SectionBase *section)
: Symbol(DefinedKind, file, name, binding, stOther, type), value(value),
size(size), section(section) {}
static bool classof(const Symbol *s) { return s->isDefined(); }
uint64_t value;
uint64_t size;
SectionBase *section;
};
// Represents a common symbol.
//
// On Unix, it is traditionally allowed to write variable definitions
// without initialization expressions (such as "int foo;") to header
// files. Such definition is called "tentative definition".
//
// Using tentative definition is usually considered a bad practice
// because you should write only declarations (such as "extern int
// foo;") to header files. Nevertheless, the linker and the compiler
// have to do something to support bad code by allowing duplicate
// definitions for this particular case.
//
// Common symbols represent variable definitions without initializations.
// The compiler creates common symbols when it sees varaible definitions
// without initialization (you can suppress this behavior and let the
// compiler create a regular defined symbol by -fno-common).
//
// The linker allows common symbols to be replaced by regular defined
// symbols. If there are remaining common symbols after name resolution is
// complete, they are converted to regular defined symbols in a .bss
// section. (Therefore, the later passes don't see any CommonSymbols.)
class CommonSymbol : public Symbol {
public:
CommonSymbol(InputFile *file, StringRefZ name, uint8_t binding,
uint8_t stOther, uint8_t type, uint64_t alignment, uint64_t size)
: Symbol(CommonKind, file, name, binding, stOther, type),
alignment(alignment), size(size) {}
static bool classof(const Symbol *s) { return s->isCommon(); }
uint32_t alignment;
uint64_t size;
};
class Undefined : public Symbol {
public:
Undefined(InputFile *file, StringRefZ name, uint8_t binding, uint8_t stOther,
uint8_t type, uint32_t discardedSecIdx = 0)
: Symbol(UndefinedKind, file, name, binding, stOther, type),
discardedSecIdx(discardedSecIdx) {}
static bool classof(const Symbol *s) { return s->kind() == UndefinedKind; }
// The section index if in a discarded section, 0 otherwise.
uint32_t discardedSecIdx;
};
class SharedSymbol : public Symbol {
public:
static bool classof(const Symbol *s) { return s->kind() == SharedKind; }
SharedSymbol(InputFile &file, StringRef name, uint8_t binding,
uint8_t stOther, uint8_t type, uint64_t value, uint64_t size,
uint32_t alignment, uint32_t verdefIndex)
: Symbol(SharedKind, &file, name, binding, stOther, type), value(value),
size(size), alignment(alignment) {
this->verdefIndex = verdefIndex;
// GNU ifunc is a mechanism to allow user-supplied functions to
// resolve PLT slot values at load-time. This is contrary to the
// regular symbol resolution scheme in which symbols are resolved just
// by name. Using this hook, you can program how symbols are solved
// for you program. For example, you can make "memcpy" to be resolved
// to a SSE-enabled version of memcpy only when a machine running the
// program supports the SSE instruction set.
//
// Naturally, such symbols should always be called through their PLT
// slots. What GNU ifunc symbols point to are resolver functions, and
// calling them directly doesn't make sense (unless you are writing a
// loader).
//
// For DSO symbols, we always call them through PLT slots anyway.
// So there's no difference between GNU ifunc and regular function
// symbols if they are in DSOs. So we can handle GNU_IFUNC as FUNC.
if (this->type == llvm::ELF::STT_GNU_IFUNC)
this->type = llvm::ELF::STT_FUNC;
}
SharedFile &getFile() const { return *cast<SharedFile>(file); }
uint64_t value; // st_value
uint64_t size; // st_size
uint32_t alignment;
// This is true if there has been at least one undefined reference to the
// symbol. The binding may change to STB_WEAK if the first undefined reference
// is weak.
bool referenced = false;
};
// LazyArchive and LazyObject represent a symbols that is not yet in the link,
// but we know where to find it if needed. If the resolver finds both Undefined
// and Lazy for the same name, it will ask the Lazy to load a file.
//
// A special complication is the handling of weak undefined symbols. They should
// not load a file, but we have to remember we have seen both the weak undefined
// and the lazy. We represent that with a lazy symbol with a weak binding. This
// means that code looking for undefined symbols normally also has to take lazy
// symbols into consideration.
// This class represents a symbol defined in an archive file. It is
// created from an archive file header, and it knows how to load an
// object file from an archive to replace itself with a defined
// symbol.
class LazyArchive : public Symbol {
public:
LazyArchive(InputFile &file, const llvm::object::Archive::Symbol s)
: Symbol(LazyArchiveKind, &file, s.getName(), llvm::ELF::STB_GLOBAL,
llvm::ELF::STV_DEFAULT, llvm::ELF::STT_NOTYPE),
sym(s) {}
static bool classof(const Symbol *s) { return s->kind() == LazyArchiveKind; }
MemoryBufferRef getMemberBuffer();
const llvm::object::Archive::Symbol sym;
};
// LazyObject symbols represents symbols in object files between
// --start-lib and --end-lib options.
class LazyObject : public Symbol {
public:
LazyObject(InputFile &file, StringRef name)
: Symbol(LazyObjectKind, &file, name, llvm::ELF::STB_GLOBAL,
llvm::ELF::STV_DEFAULT, llvm::ELF::STT_NOTYPE) {}
static bool classof(const Symbol *s) { return s->kind() == LazyObjectKind; }
};
// Some linker-generated symbols need to be created as
// Defined symbols.
struct ElfSym {
// __bss_start
static Defined *bss;
// etext and _etext
static Defined *etext1;
static Defined *etext2;
// edata and _edata
static Defined *edata1;
static Defined *edata2;
// end and _end
static Defined *end1;
static Defined *end2;
// The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention to
// be at some offset from the base of the .got section, usually 0 or
// the end of the .got.
static Defined *globalOffsetTable;
// _gp, _gp_disp and __gnu_local_gp symbols. Only for MIPS.
static Defined *mipsGp;
static Defined *mipsGpDisp;
static Defined *mipsLocalGp;
// __rel{,a}_iplt_{start,end} symbols.
static Defined *relaIpltStart;
static Defined *relaIpltEnd;
// __global_pointer$ for RISC-V.
static Defined *riscvGlobalPointer;
// _TLS_MODULE_BASE_ on targets that support TLSDESC.
static Defined *tlsModuleBase;
};
// A buffer class that is large enough to hold any Symbol-derived
// object. We allocate memory using this class and instantiate a symbol
// using the placement new.
union SymbolUnion {
alignas(Defined) char a[sizeof(Defined)];
alignas(CommonSymbol) char b[sizeof(CommonSymbol)];
alignas(Undefined) char c[sizeof(Undefined)];
alignas(SharedSymbol) char d[sizeof(SharedSymbol)];
alignas(LazyArchive) char e[sizeof(LazyArchive)];
alignas(LazyObject) char f[sizeof(LazyObject)];
};
// It is important to keep the size of SymbolUnion small for performance and
// memory usage reasons. 80 bytes is a soft limit based on the size of Defined
// on a 64-bit system.
static_assert(sizeof(SymbolUnion) <= 80, "SymbolUnion too large");
template <typename T> struct AssertSymbol {
static_assert(std::is_trivially_destructible<T>(),
"Symbol types must be trivially destructible");
static_assert(sizeof(T) <= sizeof(SymbolUnion), "SymbolUnion too small");
static_assert(alignof(T) <= alignof(SymbolUnion),
"SymbolUnion not aligned enough");
};
static inline void assertSymbols() {
AssertSymbol<Defined>();
AssertSymbol<CommonSymbol>();
AssertSymbol<Undefined>();
AssertSymbol<SharedSymbol>();
AssertSymbol<LazyArchive>();
AssertSymbol<LazyObject>();
}
void printTraceSymbol(const Symbol *sym);
size_t Symbol::getSymbolSize() const {
switch (kind()) {
case CommonKind:
return sizeof(CommonSymbol);
case DefinedKind:
return sizeof(Defined);
case LazyArchiveKind:
return sizeof(LazyArchive);
case LazyObjectKind:
return sizeof(LazyObject);
case SharedKind:
return sizeof(SharedSymbol);
case UndefinedKind:
return sizeof(Undefined);
case PlaceholderKind:
return sizeof(Symbol);
}
llvm_unreachable("unknown symbol kind");
}
// replace() replaces "this" object with a given symbol by memcpy'ing
// it over to "this". This function is called as a result of name
// resolution, e.g. to replace an undefind symbol with a defined symbol.
void Symbol::replace(const Symbol &New) {
using llvm::ELF::STT_TLS;
// Symbols representing thread-local variables must be referenced by
// TLS-aware relocations, and non-TLS symbols must be reference by
// non-TLS relocations, so there's a clear distinction between TLS
// and non-TLS symbols. It is an error if the same symbol is defined
// as a TLS symbol in one file and as a non-TLS symbol in other file.
if (symbolKind != PlaceholderKind && !isLazy() && !New.isLazy()) {
bool tlsMismatch = (type == STT_TLS && New.type != STT_TLS) ||
(type != STT_TLS && New.type == STT_TLS);
if (tlsMismatch)
error("TLS attribute mismatch: " + toString(*this) + "\n>>> defined in " +
toString(New.file) + "\n>>> defined in " + toString(file));
}
Symbol old = *this;
memcpy(this, &New, New.getSymbolSize());
versionId = old.versionId;
visibility = old.visibility;
isUsedInRegularObj = old.isUsedInRegularObj;
exportDynamic = old.exportDynamic;
canInline = old.canInline;
traced = old.traced;
isPreemptible = old.isPreemptible;
scriptDefined = old.scriptDefined;
partition = old.partition;
// Symbol length is computed lazily. If we already know a symbol length,
// propagate it.
if (nameData == old.nameData && nameSize == 0 && old.nameSize != 0)
nameSize = old.nameSize;
// Print out a log message if --trace-symbol was specified.
// This is for debugging.
if (traced)
printTraceSymbol(this);
}
void maybeWarnUnorderableSymbol(const Symbol *sym);
} // namespace elf
} // namespace lld
#endif
Index: stable/12/contrib/llvm-project/lld/docs/ReleaseNotes.rst
===================================================================
--- stable/12/contrib/llvm-project/lld/docs/ReleaseNotes.rst (revision 356464)
+++ stable/12/contrib/llvm-project/lld/docs/ReleaseNotes.rst (revision 356465)
@@ -1,231 +1,231 @@
=======================
lld 9.0.0 Release Notes
=======================
.. contents::
:local:
Introduction
============
lld is a high-performance linker that supports ELF (Unix), COFF
(Windows), Mach-O (macOS), MinGW and WebAssembly. lld is
command-line-compatible with GNU linkers and Microsoft link.exe and is
significantly faster than the system default linkers.
lld 9 has lots of feature improvements and bug fixes.
Non-comprehensive list of changes in this release
=================================================
ELF Improvements
----------------
* ld.lld now has typo suggestions for flags:
``$ ld.lld --call-shared`` now prints
``unknown argument '--call-shared', did you mean '--call_shared'``.
(`r361518 <https://reviews.llvm.org/rL361518>`_)
* ``--allow-shlib-undefined`` and ``--no-allow-shlib-undefined``
options are added. ``--no-allow-shlib-undefined`` is the default for
executables.
(`r352826 <https://reviews.llvm.org/rL352826>`_)
* ``-nmagic`` and ``-omagic`` options are fully supported.
(`r360593 <https://reviews.llvm.org/rL360593>`_)
* Segment layout has changed. PT_GNU_RELRO, which was previously
placed in the middle of readable/writable PT_LOAD segments, is now
placed at the beginning of them. This change permits lld-produced
ELF files to be read correctly by GNU strip older than 2.31, which
has a bug to discard a PT_GNU_RELRO in the former layout.
* ``-z common-page-size`` is supported.
(`r360593 <https://reviews.llvm.org/rL360593>`_)
* Diagnostics messages have improved. A new flag ``--vs-diagnostics``
alters the format of diagnostic output to enable source hyperlinks
in Microsoft Visual Studio IDE.
* Linker script compatibility with GNU BFD linker has generally improved.
* The clang ``--dependent-library`` form of autolinking is supported.
This feature is added to implement the Windows-style autolinking for
Unix. On Unix, in order to use a library, you usually have to
include a header file provided by the library and then explicitly
link the library with the linker ``-l`` option. On Windows, header
files usually contain pragmas that list needed libraries. Compilers
copy that information to object files, so that linkers can
automatically link needed libraries. ``--dependent-library`` is
added for implementing that Windows semantics on Unix.
(`r360984 <https://reviews.llvm.org/rL360984>`_)
* AArch64 BTI and PAC are supported.
(`r362793 <https://reviews.llvm.org/rL362793>`_)
* lld now supports replacing ``JAL`` with ``JALX`` instructions in case
of MIPS-microMIPS cross-mode jumps.
(`r354311 <https://reviews.llvm.org/rL354311>`_)
* lld now creates LA25 thunks for MIPS R6 code.
(`r354312 <https://reviews.llvm.org/rL354312>`_)
* Put MIPS-specific .reginfo, .MIPS.options, and .MIPS.abiflags sections
into corresponding PT_MIPS_REGINFO, PT_MIPS_OPTIONS, and PT_MIPS_ABIFLAGS
segments.
* The quality of RISC-V and PowerPC ports have greatly improved. Many
applications can now be linked by lld. PowerPC64 is now almost
production ready.
* The Linux kernel for arm32_7, arm64, ppc64le and x86_64 can now be
linked by lld.
* x86-64 TLSDESC is supported.
(`r361911 <https://reviews.llvm.org/rL361911>`_,
`r362078 <https://reviews.llvm.org/rL362078>`_)
* DF_STATIC_TLS flag is set for i386 and x86-64 when needed.
(`r353293 <https://reviews.llvm.org/rL353293>`_,
`r353378 <https://reviews.llvm.org/rL353378>`_)
* The experimental partitioning feature is added to allow a program to
be split into multiple pieces.
The feature allows you to semi-automatically split a single program
into multiple ELF files called "partitions". Since all partitions
share the same memory address space and don't use PLT/GOT, split
programs run as fast as regular programs.
With the mechanism, you can start a program only with a "main"
partition and load remaining partitions on-demand. For example, you
can split a web browser into a main partition and a PDF reader
sub-partition and load the PDF reader partition only when a user
tries to open a PDF file.
See `the documentation <Partitions.html>`_ for more information.
* If "-" is given as an output filename, lld writes the final result
to the standard output. Previously, it created a file "-" in the
current directory.
(`r351852 <https://reviews.llvm.org/rL351852>`_)
* ``-z ifunc-noplt`` option is added to reduce IFunc function call
overhead in a freestanding environment such as the OS kernel.
Functions resolved by the IFunc mechanism are usually dispatched via
PLT and thus slower than regular functions because of the cost of
indirection. With ``-z ifunc-noplt``, you can eliminate it by doing
text relocations at load-time. You need a special loader to utilize
this feature. This feature is added for the FreeBSD kernel but can
be used by any operating systems.
(`r360685 <https://reviews.llvm.org/rL360685>`_)
* ``--undefined-glob`` option is added. The new option is an extension
to ``--undefined`` to take a glob pattern instead of a single symbol
name.
(`r363396 <https://reviews.llvm.org/rL363396>`_)
COFF Improvements
-----------------
* Like the ELF driver, lld-link now has typo suggestions for flags.
(`r361518 <https://reviews.llvm.org/rL361518>`_)
* lld-link now correctly reports duplicate symbol errors for object
files that were compiled with ``/Gy``.
(`r352590 <https://reviews.llvm.org/rL352590>`_)
* lld-link now correctly reports duplicate symbol errors when several
resource (.res) input files define resources with the same type,
name and language. This can be demoted to a warning using
``/force:multipleres``.
(`r359829 <https://reviews.llvm.org/rL359829>`_)
* lld-link now rejects more than one resource object input files,
matching link.exe. Previously, lld-link would silently ignore all
but one. If you hit this: Don't pass resource object files to the
linker, instead pass res files to the linker directly. Don't put
resource files in static libraries, pass them on the command line.
(`r359749 <https://reviews.llvm.org/rL359749>`_)
* Having more than two ``/natvis:`` now works correctly; it used to not
work for larger binaries before.
- (`r327895 <https://reviews.llvm.org/rL327895>`_)
+ (`r359515 <https://reviews.llvm.org/rL359515>`_)
* Undefined symbols are now printed only in demangled form. Pass
``/demangle:no`` to see raw symbol names instead.
(`r355878 <https://reviews.llvm.org/rL355878>`_)
* Several speed and memory usage improvements.
* lld-link now supports resource object files created by GNU windres and
MS cvtres, not only llvm-cvtres.
* The generated thunks for delayimports now share the majority of code
among thunks, significantly reducing the overhead of using delayimport.
(`r365823 <https://reviews.llvm.org/rL365823>`_)
* ``IMAGE_REL_ARM{,64}_REL32`` relocations are supported.
(`r352325 <https://reviews.llvm.org/rL352325>`_)
* Range extension thunks for AArch64 are now supported, so lld can
create large executables for Windows/ARM64.
(`r352929 <https://reviews.llvm.org/rL352929>`_)
* The following flags have been added:
``/functionpadmin`` (`r354716 <https://reviews.llvm.org/rL354716>`_),
``/swaprun:`` (`r359192 <https://reviews.llvm.org/rL359192>`_),
``/threads:no`` (`r355029 <https://reviews.llvm.org/rL355029>`_),
``/filealign`` (`r361634 <https://reviews.llvm.org/rL361634>`_)
WebAssembly Improvements
------------------------
* Imports from custom module names are supported.
(`r352828 <https://reviews.llvm.org/rL352828>`_)
* Symbols that are in llvm.used are now exported by default.
(`r353364 <https://reviews.llvm.org/rL353364>`_)
* Initial support for PIC and dynamic linking has landed.
(`r357022 <https://reviews.llvm.org/rL357022>`_)
* wasm-ld now add ``__start_``/``__stop_`` symbols for data sections.
(`r361236 <https://reviews.llvm.org/rL361236>`_)
* wasm-ld now doesn't report an error on archives without a symbol index.
(`r364338 <https://reviews.llvm.org/rL364338>`_)
* The following flags have been added:
``--emit-relocs`` (`r361635 <https://reviews.llvm.org/rL361635>`_),
``--wrap`` (`r361639 <https://reviews.llvm.org/rL361639>`_),
``--trace`` and ``--trace-symbol``
(`r353264 <https://reviews.llvm.org/rL353264>`_).
MinGW Improvements
------------------
* lld now correctly links crtend.o as the last object file, handling
terminators for the sections such as .eh_frame properly, fixing
DWARF exception handling with libgcc and gcc's crtend.o.
* lld now also handles DWARF unwind info generated by GCC, when linking
with libgcc.
* PDB output can be requested without manually specifying the PDB file
name, with the new option ``-pdb=`` with an empty value to the option.
(The old existing syntax ``-pdb <filename>`` was more cumbersome to use
with an empty parameter value.)
* ``--no-insert-timestamp`` option is added as an alias to ``/timestamp:0``.
(`r353145 <https://reviews.llvm.org/rL353145>`_)
* Many more GNU ld options are now supported, which e.g. allows the lld
MinGW frontend to be called by GCC.
* The following options are added: ``--exclude-all-symbols``,
``--appcontainer``, ``--undefined``
Index: stable/12/contrib/llvm-project/lld
===================================================================
--- stable/12/contrib/llvm-project/lld (revision 356464)
+++ stable/12/contrib/llvm-project/lld (revision 356465)
Property changes on: stable/12/contrib/llvm-project/lld
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /head/contrib/llvm-project/lld:r356004
Merged /vendor/llvm-project/release-9.x/lld:r355948-355987
Index: stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.cpp
===================================================================
--- stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.cpp (nonexistent)
+++ stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.cpp (revision 356465)
@@ -0,0 +1,88 @@
+#include "PythonReadline.h"
+
+#ifdef LLDB_USE_LIBEDIT_READLINE_COMPAT_MODULE
+
+#include <stdio.h>
+
+#include <editline/readline.h>
+
+// Simple implementation of the Python readline module using libedit.
+// In the event that libedit is excluded from the build, this turns
+// back into a null implementation that blocks the module from pulling
+// in the GNU readline shared lib, which causes linkage confusion when
+// both readline and libedit's readline compatibility symbols collide.
+//
+// Currently it only installs a PyOS_ReadlineFunctionPointer, without
+// implementing any of the readline module methods. This is meant to
+// work around LLVM pr18841 to avoid seg faults in the stock Python
+// readline.so linked against GNU readline.
+//
+// Bug on the cpython side: https://bugs.python.org/issue38634
+
+PyDoc_STRVAR(moduleDocumentation,
+ "Simple readline module implementation based on libedit.");
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef readline_module = {
+ PyModuleDef_HEAD_INIT, // m_base
+ "lldb_editline", // m_name
+ moduleDocumentation, // m_doc
+ -1, // m_size
+ nullptr, // m_methods
+ nullptr, // m_reload
+ nullptr, // m_traverse
+ nullptr, // m_clear
+ nullptr, // m_free
+};
+#else
+static struct PyMethodDef moduleMethods[] = {{nullptr, nullptr, 0, nullptr}};
+#endif
+
+static char *
+#if PY_MAJOR_VERSION >= 3
+simple_readline(FILE *stdin, FILE *stdout, const char *prompt)
+#else
+simple_readline(FILE *stdin, FILE *stdout, char *prompt)
+#endif
+{
+ rl_instream = stdin;
+ rl_outstream = stdout;
+ char *line = readline(prompt);
+ if (!line) {
+#if PY_MAJOR_VERSION >= 3
+ char *ret = (char *)PyMem_RawMalloc(1);
+#else
+ char *ret = (char *)PyMem_Malloc(1);
+#endif
+ if (ret != NULL)
+ *ret = '\0';
+ return ret;
+ }
+ if (*line)
+ add_history(line);
+ int n = strlen(line);
+#if PY_MAJOR_VERSION >= 3
+ char *ret = (char *)PyMem_RawMalloc(n + 2);
+#else
+ char *ret = (char *)PyMem_Malloc(n + 2);
+#endif
+ if (ret) {
+ strncpy(ret, line, n);
+ free(line);
+ ret[n] = '\n';
+ ret[n + 1] = '\0';
+ }
+ return ret;
+}
+
+PyMODINIT_FUNC initlldb_readline(void) {
+ PyOS_ReadlineFunctionPointer = simple_readline;
+
+#if PY_MAJOR_VERSION >= 3
+ return PyModule_Create(&readline_module);
+#else
+ Py_InitModule4("readline", moduleMethods, moduleDocumentation,
+ static_cast<PyObject *>(NULL), PYTHON_API_VERSION);
+#endif
+}
+#endif
Property changes on: stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+FreeBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.h
===================================================================
--- stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.h (nonexistent)
+++ stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.h (revision 356465)
@@ -0,0 +1,26 @@
+//===-- PythonReadline.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_PYTHONREADLINE_H
+#define LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_PYTHONREADLINE_H
+
+#if !defined(LLDB_DISABLE_LIBEDIT) && defined(__linux__)
+// NOTE: Since Python may define some pre-processor definitions which affect the
+// standard headers on some systems, you must include Python.h before any
+// standard headers are included.
+#include "Python.h"
+
+// no need to hack into Python's readline module if libedit isn't used.
+//
+#define LLDB_USE_LIBEDIT_READLINE_COMPAT_MODULE 1
+
+extern "C" PyMODINIT_FUNC initlldb_readline(void);
+
+#endif
+
+#endif // LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_PYTHONREADLINE_H
Property changes on: stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonReadline.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+FreeBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
===================================================================
--- stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp (revision 356465)
@@ -1,3259 +1,3276 @@
//===-- ScriptInterpreterPython.cpp -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifdef LLDB_DISABLE_PYTHON
// Python is disabled in this build
#else
// LLDB Python header must be included first
#include "lldb-python.h"
#include "PythonDataObjects.h"
#include "PythonExceptionState.h"
+#include "PythonReadline.h"
#include "ScriptInterpreterPythonImpl.h"
#include "lldb/API/SBFrame.h"
#include "lldb/API/SBValue.h"
#include "lldb/Breakpoint/StoppointCallbackContext.h"
#include "lldb/Breakpoint/WatchpointOptions.h"
#include "lldb/Core/Communication.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/ValueObject.h"
#include "lldb/DataFormatters/TypeSummary.h"
#include "lldb/Host/ConnectionFileDescriptor.h"
#include "lldb/Host/FileSystem.h"
#include "lldb/Host/HostInfo.h"
#include "lldb/Host/Pipe.h"
#include "lldb/Interpreter/CommandInterpreter.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlan.h"
#include "lldb/Utility/Timer.h"
#if defined(_WIN32)
#include "lldb/Host/windows/ConnectionGenericFileWindows.h"
#endif
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FileSystem.h"
#include <memory>
#include <mutex>
#include <stdio.h>
#include <stdlib.h>
#include <string>
using namespace lldb;
using namespace lldb_private;
// Defined in the SWIG source file
#if PY_MAJOR_VERSION >= 3
extern "C" PyObject *PyInit__lldb(void);
#define LLDBSwigPyInit PyInit__lldb
#else
extern "C" void init_lldb(void);
#define LLDBSwigPyInit init_lldb
#endif
// These prototypes are the Pythonic implementations of the required callbacks.
// Although these are scripting-language specific, their definition depends on
// the public API.
extern "C" bool LLDBSwigPythonBreakpointCallbackFunction(
const char *python_function_name, const char *session_dictionary_name,
const lldb::StackFrameSP &sb_frame,
const lldb::BreakpointLocationSP &sb_bp_loc);
extern "C" bool LLDBSwigPythonWatchpointCallbackFunction(
const char *python_function_name, const char *session_dictionary_name,
const lldb::StackFrameSP &sb_frame, const lldb::WatchpointSP &sb_wp);
extern "C" bool LLDBSwigPythonCallTypeScript(
const char *python_function_name, void *session_dictionary,
const lldb::ValueObjectSP &valobj_sp, void **pyfunct_wrapper,
const lldb::TypeSummaryOptionsSP &options_sp, std::string &retval);
extern "C" void *
LLDBSwigPythonCreateSyntheticProvider(const char *python_class_name,
const char *session_dictionary_name,
const lldb::ValueObjectSP &valobj_sp);
extern "C" void *
LLDBSwigPythonCreateCommandObject(const char *python_class_name,
const char *session_dictionary_name,
const lldb::DebuggerSP debugger_sp);
extern "C" void *LLDBSwigPythonCreateScriptedThreadPlan(
const char *python_class_name, const char *session_dictionary_name,
const lldb::ThreadPlanSP &thread_plan_sp);
extern "C" bool LLDBSWIGPythonCallThreadPlan(void *implementor,
const char *method_name,
Event *event_sp, bool &got_error);
extern "C" void *LLDBSwigPythonCreateScriptedBreakpointResolver(
const char *python_class_name, const char *session_dictionary_name,
lldb_private::StructuredDataImpl *args, lldb::BreakpointSP &bkpt_sp);
extern "C" unsigned int
LLDBSwigPythonCallBreakpointResolver(void *implementor, const char *method_name,
lldb_private::SymbolContext *sym_ctx);
extern "C" size_t LLDBSwigPython_CalculateNumChildren(void *implementor,
uint32_t max);
extern "C" void *LLDBSwigPython_GetChildAtIndex(void *implementor,
uint32_t idx);
extern "C" int LLDBSwigPython_GetIndexOfChildWithName(void *implementor,
const char *child_name);
extern "C" void *LLDBSWIGPython_CastPyObjectToSBValue(void *data);
extern lldb::ValueObjectSP
LLDBSWIGPython_GetValueObjectSPFromSBValue(void *data);
extern "C" bool LLDBSwigPython_UpdateSynthProviderInstance(void *implementor);
extern "C" bool
LLDBSwigPython_MightHaveChildrenSynthProviderInstance(void *implementor);
extern "C" void *
LLDBSwigPython_GetValueSynthProviderInstance(void *implementor);
extern "C" bool
LLDBSwigPythonCallCommand(const char *python_function_name,
const char *session_dictionary_name,
lldb::DebuggerSP &debugger, const char *args,
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp);
extern "C" bool
LLDBSwigPythonCallCommandObject(void *implementor, lldb::DebuggerSP &debugger,
const char *args,
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp);
extern "C" bool
LLDBSwigPythonCallModuleInit(const char *python_module_name,
const char *session_dictionary_name,
lldb::DebuggerSP &debugger);
extern "C" void *
LLDBSWIGPythonCreateOSPlugin(const char *python_class_name,
const char *session_dictionary_name,
const lldb::ProcessSP &process_sp);
extern "C" void *
LLDBSWIGPython_CreateFrameRecognizer(const char *python_class_name,
const char *session_dictionary_name);
extern "C" void *
LLDBSwigPython_GetRecognizedArguments(void *implementor,
const lldb::StackFrameSP &frame_sp);
extern "C" bool LLDBSWIGPythonRunScriptKeywordProcess(
const char *python_function_name, const char *session_dictionary_name,
lldb::ProcessSP &process, std::string &output);
extern "C" bool LLDBSWIGPythonRunScriptKeywordThread(
const char *python_function_name, const char *session_dictionary_name,
lldb::ThreadSP &thread, std::string &output);
extern "C" bool LLDBSWIGPythonRunScriptKeywordTarget(
const char *python_function_name, const char *session_dictionary_name,
lldb::TargetSP &target, std::string &output);
extern "C" bool LLDBSWIGPythonRunScriptKeywordFrame(
const char *python_function_name, const char *session_dictionary_name,
lldb::StackFrameSP &frame, std::string &output);
extern "C" bool LLDBSWIGPythonRunScriptKeywordValue(
const char *python_function_name, const char *session_dictionary_name,
lldb::ValueObjectSP &value, std::string &output);
extern "C" void *
LLDBSWIGPython_GetDynamicSetting(void *module, const char *setting,
const lldb::TargetSP &target_sp);
static bool g_initialized = false;
namespace {
// Initializing Python is not a straightforward process. We cannot control
// what external code may have done before getting to this point in LLDB,
// including potentially having already initialized Python, so we need to do a
// lot of work to ensure that the existing state of the system is maintained
// across our initialization. We do this by using an RAII pattern where we
// save off initial state at the beginning, and restore it at the end
struct InitializePythonRAII {
public:
InitializePythonRAII()
: m_gil_state(PyGILState_UNLOCKED), m_was_already_initialized(false) {
// Python will muck with STDIN terminal state, so save off any current TTY
// settings so we can restore them.
m_stdin_tty_state.Save(STDIN_FILENO, false);
InitializePythonHome();
+
+#ifdef LLDB_USE_LIBEDIT_READLINE_COMPAT_MODULE
+ // Python's readline is incompatible with libedit being linked into lldb.
+ // Provide a patched version local to the embedded interpreter.
+ bool ReadlinePatched = false;
+ for (auto *p = PyImport_Inittab; p->name != NULL; p++) {
+ if (strcmp(p->name, "readline") == 0) {
+ p->initfunc = initlldb_readline;
+ break;
+ }
+ }
+ if (!ReadlinePatched) {
+ PyImport_AppendInittab("readline", initlldb_readline);
+ ReadlinePatched = true;
+ }
+#endif
// Register _lldb as a built-in module.
PyImport_AppendInittab("_lldb", LLDBSwigPyInit);
// Python < 3.2 and Python >= 3.2 reversed the ordering requirements for
// calling `Py_Initialize` and `PyEval_InitThreads`. < 3.2 requires that you
// call `PyEval_InitThreads` first, and >= 3.2 requires that you call it last.
#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || (PY_MAJOR_VERSION > 3)
Py_InitializeEx(0);
InitializeThreadsPrivate();
#else
InitializeThreadsPrivate();
Py_InitializeEx(0);
#endif
}
~InitializePythonRAII() {
if (m_was_already_initialized) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
LLDB_LOGV(log, "Releasing PyGILState. Returning to state = {0}locked",
m_gil_state == PyGILState_UNLOCKED ? "un" : "");
PyGILState_Release(m_gil_state);
} else {
// We initialized the threads in this function, just unlock the GIL.
PyEval_SaveThread();
}
m_stdin_tty_state.Restore();
}
private:
void InitializePythonHome() {
#if defined(LLDB_PYTHON_HOME)
#if PY_MAJOR_VERSION >= 3
size_t size = 0;
static wchar_t *g_python_home = Py_DecodeLocale(LLDB_PYTHON_HOME, &size);
#else
static char g_python_home[] = LLDB_PYTHON_HOME;
#endif
Py_SetPythonHome(g_python_home);
#else
#if defined(__APPLE__) && PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION == 7
// For Darwin, the only Python version supported is the one shipped in the
// OS OS and linked with lldb. Other installation of Python may have higher
// priorities in the path, overriding PYTHONHOME and causing
// problems/incompatibilities. In order to avoid confusion, always hardcode
// the PythonHome to be right, as it's not going to change.
static char path[] =
"/System/Library/Frameworks/Python.framework/Versions/2.7";
Py_SetPythonHome(path);
#endif
#endif
}
void InitializeThreadsPrivate() {
// Since Python 3.7 `Py_Initialize` calls `PyEval_InitThreads` inside itself,
// so there is no way to determine whether the embedded interpreter
// was already initialized by some external code. `PyEval_ThreadsInitialized`
// would always return `true` and `PyGILState_Ensure/Release` flow would be
// executed instead of unlocking GIL with `PyEval_SaveThread`. When
// an another thread calls `PyGILState_Ensure` it would get stuck in deadlock.
#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 7) || (PY_MAJOR_VERSION > 3)
// The only case we should go further and acquire the GIL: it is unlocked.
if (PyGILState_Check())
return;
#endif
if (PyEval_ThreadsInitialized()) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
m_was_already_initialized = true;
m_gil_state = PyGILState_Ensure();
LLDB_LOGV(log, "Ensured PyGILState. Previous state = {0}locked\n",
m_gil_state == PyGILState_UNLOCKED ? "un" : "");
return;
}
// InitThreads acquires the GIL if it hasn't been called before.
PyEval_InitThreads();
}
TerminalState m_stdin_tty_state;
PyGILState_STATE m_gil_state;
bool m_was_already_initialized;
};
} // namespace
void ScriptInterpreterPython::ComputePythonDirForApple(
llvm::SmallVectorImpl<char> &path) {
auto style = llvm::sys::path::Style::posix;
llvm::StringRef path_ref(path.begin(), path.size());
auto rbegin = llvm::sys::path::rbegin(path_ref, style);
auto rend = llvm::sys::path::rend(path_ref);
auto framework = std::find(rbegin, rend, "LLDB.framework");
if (framework == rend) {
ComputePythonDirForPosix(path);
return;
}
path.resize(framework - rend);
llvm::sys::path::append(path, style, "LLDB.framework", "Resources", "Python");
}
void ScriptInterpreterPython::ComputePythonDirForPosix(
llvm::SmallVectorImpl<char> &path) {
auto style = llvm::sys::path::Style::posix;
#if defined(LLDB_PYTHON_RELATIVE_LIBDIR)
// Build the path by backing out of the lib dir, then building with whatever
// the real python interpreter uses. (e.g. lib for most, lib64 on RHEL
// x86_64).
llvm::sys::path::remove_filename(path, style);
llvm::sys::path::append(path, style, LLDB_PYTHON_RELATIVE_LIBDIR);
#else
llvm::sys::path::append(path, style,
"python" + llvm::Twine(PY_MAJOR_VERSION) + "." +
llvm::Twine(PY_MINOR_VERSION),
"site-packages");
#endif
}
void ScriptInterpreterPython::ComputePythonDirForWindows(
llvm::SmallVectorImpl<char> &path) {
auto style = llvm::sys::path::Style::windows;
llvm::sys::path::remove_filename(path, style);
llvm::sys::path::append(path, style, "lib", "site-packages");
// This will be injected directly through FileSpec.GetDirectory().SetString(),
// so we need to normalize manually.
std::replace(path.begin(), path.end(), '\\', '/');
}
FileSpec ScriptInterpreterPython::GetPythonDir() {
static FileSpec g_spec = []() {
FileSpec spec = HostInfo::GetShlibDir();
if (!spec)
return FileSpec();
llvm::SmallString<64> path;
spec.GetPath(path);
#if defined(__APPLE__)
ComputePythonDirForApple(path);
#elif defined(_WIN32)
ComputePythonDirForWindows(path);
#else
ComputePythonDirForPosix(path);
#endif
spec.GetDirectory().SetString(path);
return spec;
}();
return g_spec;
}
lldb_private::ConstString ScriptInterpreterPython::GetPluginNameStatic() {
static ConstString g_name("script-python");
return g_name;
}
const char *ScriptInterpreterPython::GetPluginDescriptionStatic() {
return "Embedded Python interpreter";
}
void ScriptInterpreterPython::Initialize() {
static llvm::once_flag g_once_flag;
llvm::call_once(g_once_flag, []() {
PluginManager::RegisterPlugin(GetPluginNameStatic(),
GetPluginDescriptionStatic(),
lldb::eScriptLanguagePython,
ScriptInterpreterPythonImpl::CreateInstance);
});
}
void ScriptInterpreterPython::Terminate() {}
ScriptInterpreterPythonImpl::Locker::Locker(
ScriptInterpreterPythonImpl *py_interpreter, uint16_t on_entry,
uint16_t on_leave, FILE *in, FILE *out, FILE *err)
: ScriptInterpreterLocker(),
m_teardown_session((on_leave & TearDownSession) == TearDownSession),
m_python_interpreter(py_interpreter) {
DoAcquireLock();
if ((on_entry & InitSession) == InitSession) {
if (!DoInitSession(on_entry, in, out, err)) {
// Don't teardown the session if we didn't init it.
m_teardown_session = false;
}
}
}
bool ScriptInterpreterPythonImpl::Locker::DoAcquireLock() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
m_GILState = PyGILState_Ensure();
LLDB_LOGV(log, "Ensured PyGILState. Previous state = {0}locked",
m_GILState == PyGILState_UNLOCKED ? "un" : "");
// we need to save the thread state when we first start the command because
// we might decide to interrupt it while some action is taking place outside
// of Python (e.g. printing to screen, waiting for the network, ...) in that
// case, _PyThreadState_Current will be NULL - and we would be unable to set
// the asynchronous exception - not a desirable situation
m_python_interpreter->SetThreadState(PyThreadState_Get());
m_python_interpreter->IncrementLockCount();
return true;
}
bool ScriptInterpreterPythonImpl::Locker::DoInitSession(uint16_t on_entry_flags,
FILE *in, FILE *out,
FILE *err) {
if (!m_python_interpreter)
return false;
return m_python_interpreter->EnterSession(on_entry_flags, in, out, err);
}
bool ScriptInterpreterPythonImpl::Locker::DoFreeLock() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
LLDB_LOGV(log, "Releasing PyGILState. Returning to state = {0}locked",
m_GILState == PyGILState_UNLOCKED ? "un" : "");
PyGILState_Release(m_GILState);
m_python_interpreter->DecrementLockCount();
return true;
}
bool ScriptInterpreterPythonImpl::Locker::DoTearDownSession() {
if (!m_python_interpreter)
return false;
m_python_interpreter->LeaveSession();
return true;
}
ScriptInterpreterPythonImpl::Locker::~Locker() {
if (m_teardown_session)
DoTearDownSession();
DoFreeLock();
}
ScriptInterpreterPythonImpl::ScriptInterpreterPythonImpl(Debugger &debugger)
: ScriptInterpreterPython(debugger), m_saved_stdin(), m_saved_stdout(),
m_saved_stderr(), m_main_module(),
m_session_dict(PyInitialValue::Invalid),
m_sys_module_dict(PyInitialValue::Invalid), m_run_one_line_function(),
m_run_one_line_str_global(),
m_dictionary_name(m_debugger.GetInstanceName().AsCString()),
m_terminal_state(), m_active_io_handler(eIOHandlerNone),
m_session_is_active(false), m_pty_slave_is_open(false),
m_valid_session(true), m_lock_count(0), m_command_thread_state(nullptr) {
InitializePrivate();
m_dictionary_name.append("_dict");
StreamString run_string;
run_string.Printf("%s = dict()", m_dictionary_name.c_str());
Locker locker(this, Locker::AcquireLock, Locker::FreeAcquiredLock);
PyRun_SimpleString(run_string.GetData());
run_string.Clear();
run_string.Printf(
"run_one_line (%s, 'import copy, keyword, os, re, sys, uuid, lldb')",
m_dictionary_name.c_str());
PyRun_SimpleString(run_string.GetData());
// Reloading modules requires a different syntax in Python 2 and Python 3.
// This provides a consistent syntax no matter what version of Python.
run_string.Clear();
run_string.Printf("run_one_line (%s, 'from six.moves import reload_module')",
m_dictionary_name.c_str());
PyRun_SimpleString(run_string.GetData());
// WARNING: temporary code that loads Cocoa formatters - this should be done
// on a per-platform basis rather than loading the whole set and letting the
// individual formatter classes exploit APIs to check whether they can/cannot
// do their task
run_string.Clear();
run_string.Printf(
"run_one_line (%s, 'import lldb.formatters, lldb.formatters.cpp, pydoc')",
m_dictionary_name.c_str());
PyRun_SimpleString(run_string.GetData());
run_string.Clear();
run_string.Printf("run_one_line (%s, 'import lldb.embedded_interpreter; from "
"lldb.embedded_interpreter import run_python_interpreter; "
"from lldb.embedded_interpreter import run_one_line')",
m_dictionary_name.c_str());
PyRun_SimpleString(run_string.GetData());
run_string.Clear();
run_string.Printf("run_one_line (%s, 'lldb.debugger_unique_id = %" PRIu64
"; pydoc.pager = pydoc.plainpager')",
m_dictionary_name.c_str(), m_debugger.GetID());
PyRun_SimpleString(run_string.GetData());
}
ScriptInterpreterPythonImpl::~ScriptInterpreterPythonImpl() {
// the session dictionary may hold objects with complex state which means
// that they may need to be torn down with some level of smarts and that, in
// turn, requires a valid thread state force Python to procure itself such a
// thread state, nuke the session dictionary and then release it for others
// to use and proceed with the rest of the shutdown
auto gil_state = PyGILState_Ensure();
m_session_dict.Reset();
PyGILState_Release(gil_state);
}
lldb_private::ConstString ScriptInterpreterPythonImpl::GetPluginName() {
return GetPluginNameStatic();
}
uint32_t ScriptInterpreterPythonImpl::GetPluginVersion() { return 1; }
void ScriptInterpreterPythonImpl::IOHandlerActivated(IOHandler &io_handler,
bool interactive) {
const char *instructions = nullptr;
switch (m_active_io_handler) {
case eIOHandlerNone:
break;
case eIOHandlerBreakpoint:
instructions = R"(Enter your Python command(s). Type 'DONE' to end.
def function (frame, bp_loc, internal_dict):
"""frame: the lldb.SBFrame for the location at which you stopped
bp_loc: an lldb.SBBreakpointLocation for the breakpoint location information
internal_dict: an LLDB support object not to be used"""
)";
break;
case eIOHandlerWatchpoint:
instructions = "Enter your Python command(s). Type 'DONE' to end.\n";
break;
}
if (instructions) {
StreamFileSP output_sp(io_handler.GetOutputStreamFile());
if (output_sp && interactive) {
output_sp->PutCString(instructions);
output_sp->Flush();
}
}
}
void ScriptInterpreterPythonImpl::IOHandlerInputComplete(IOHandler &io_handler,
std::string &data) {
io_handler.SetIsDone(true);
bool batch_mode = m_debugger.GetCommandInterpreter().GetBatchCommandMode();
switch (m_active_io_handler) {
case eIOHandlerNone:
break;
case eIOHandlerBreakpoint: {
std::vector<BreakpointOptions *> *bp_options_vec =
(std::vector<BreakpointOptions *> *)io_handler.GetUserData();
for (auto bp_options : *bp_options_vec) {
if (!bp_options)
continue;
auto data_up = llvm::make_unique<CommandDataPython>();
if (!data_up)
break;
data_up->user_source.SplitIntoLines(data);
if (GenerateBreakpointCommandCallbackData(data_up->user_source,
data_up->script_source)
.Success()) {
auto baton_sp = std::make_shared<BreakpointOptions::CommandBaton>(
std::move(data_up));
bp_options->SetCallback(
ScriptInterpreterPythonImpl::BreakpointCallbackFunction, baton_sp);
} else if (!batch_mode) {
StreamFileSP error_sp = io_handler.GetErrorStreamFile();
if (error_sp) {
error_sp->Printf("Warning: No command attached to breakpoint.\n");
error_sp->Flush();
}
}
}
m_active_io_handler = eIOHandlerNone;
} break;
case eIOHandlerWatchpoint: {
WatchpointOptions *wp_options =
(WatchpointOptions *)io_handler.GetUserData();
auto data_up = llvm::make_unique<WatchpointOptions::CommandData>();
data_up->user_source.SplitIntoLines(data);
if (GenerateWatchpointCommandCallbackData(data_up->user_source,
data_up->script_source)) {
auto baton_sp =
std::make_shared<WatchpointOptions::CommandBaton>(std::move(data_up));
wp_options->SetCallback(
ScriptInterpreterPythonImpl::WatchpointCallbackFunction, baton_sp);
} else if (!batch_mode) {
StreamFileSP error_sp = io_handler.GetErrorStreamFile();
if (error_sp) {
error_sp->Printf("Warning: No command attached to breakpoint.\n");
error_sp->Flush();
}
}
m_active_io_handler = eIOHandlerNone;
} break;
}
}
lldb::ScriptInterpreterSP
ScriptInterpreterPythonImpl::CreateInstance(Debugger &debugger) {
return std::make_shared<ScriptInterpreterPythonImpl>(debugger);
}
void ScriptInterpreterPythonImpl::ResetOutputFileHandle(FILE *fh) {}
void ScriptInterpreterPythonImpl::SaveTerminalState(int fd) {
// Python mucks with the terminal state of STDIN. If we can possibly avoid
// this by setting the file handles up correctly prior to entering the
// interpreter we should. For now we save and restore the terminal state on
// the input file handle.
m_terminal_state.Save(fd, false);
}
void ScriptInterpreterPythonImpl::RestoreTerminalState() {
// Python mucks with the terminal state of STDIN. If we can possibly avoid
// this by setting the file handles up correctly prior to entering the
// interpreter we should. For now we save and restore the terminal state on
// the input file handle.
m_terminal_state.Restore();
}
void ScriptInterpreterPythonImpl::LeaveSession() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
if (log)
log->PutCString("ScriptInterpreterPythonImpl::LeaveSession()");
// checking that we have a valid thread state - since we use our own
// threading and locking in some (rare) cases during cleanup Python may end
// up believing we have no thread state and PyImport_AddModule will crash if
// that is the case - since that seems to only happen when destroying the
// SBDebugger, we can make do without clearing up stdout and stderr
// rdar://problem/11292882
// When the current thread state is NULL, PyThreadState_Get() issues a fatal
// error.
if (PyThreadState_GetDict()) {
PythonDictionary &sys_module_dict = GetSysModuleDictionary();
if (sys_module_dict.IsValid()) {
if (m_saved_stdin.IsValid()) {
sys_module_dict.SetItemForKey(PythonString("stdin"), m_saved_stdin);
m_saved_stdin.Reset();
}
if (m_saved_stdout.IsValid()) {
sys_module_dict.SetItemForKey(PythonString("stdout"), m_saved_stdout);
m_saved_stdout.Reset();
}
if (m_saved_stderr.IsValid()) {
sys_module_dict.SetItemForKey(PythonString("stderr"), m_saved_stderr);
m_saved_stderr.Reset();
}
}
}
m_session_is_active = false;
}
bool ScriptInterpreterPythonImpl::SetStdHandle(File &file, const char *py_name,
PythonFile &save_file,
const char *mode) {
if (file.IsValid()) {
// Flush the file before giving it to python to avoid interleaved output.
file.Flush();
PythonDictionary &sys_module_dict = GetSysModuleDictionary();
save_file = sys_module_dict.GetItemForKey(PythonString(py_name))
.AsType<PythonFile>();
PythonFile new_file(file, mode);
sys_module_dict.SetItemForKey(PythonString(py_name), new_file);
return true;
} else
save_file.Reset();
return false;
}
bool ScriptInterpreterPythonImpl::EnterSession(uint16_t on_entry_flags,
FILE *in, FILE *out, FILE *err) {
// If we have already entered the session, without having officially 'left'
// it, then there is no need to 'enter' it again.
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
if (m_session_is_active) {
if (log)
log->Printf(
"ScriptInterpreterPythonImpl::EnterSession(on_entry_flags=0x%" PRIx16
") session is already active, returning without doing anything",
on_entry_flags);
return false;
}
if (log)
log->Printf(
"ScriptInterpreterPythonImpl::EnterSession(on_entry_flags=0x%" PRIx16
")",
on_entry_flags);
m_session_is_active = true;
StreamString run_string;
if (on_entry_flags & Locker::InitGlobals) {
run_string.Printf("run_one_line (%s, 'lldb.debugger_unique_id = %" PRIu64,
m_dictionary_name.c_str(), m_debugger.GetID());
run_string.Printf(
"; lldb.debugger = lldb.SBDebugger.FindDebuggerWithID (%" PRIu64 ")",
m_debugger.GetID());
run_string.PutCString("; lldb.target = lldb.debugger.GetSelectedTarget()");
run_string.PutCString("; lldb.process = lldb.target.GetProcess()");
run_string.PutCString("; lldb.thread = lldb.process.GetSelectedThread ()");
run_string.PutCString("; lldb.frame = lldb.thread.GetSelectedFrame ()");
run_string.PutCString("')");
} else {
// If we aren't initing the globals, we should still always set the
// debugger (since that is always unique.)
run_string.Printf("run_one_line (%s, 'lldb.debugger_unique_id = %" PRIu64,
m_dictionary_name.c_str(), m_debugger.GetID());
run_string.Printf(
"; lldb.debugger = lldb.SBDebugger.FindDebuggerWithID (%" PRIu64 ")",
m_debugger.GetID());
run_string.PutCString("')");
}
PyRun_SimpleString(run_string.GetData());
run_string.Clear();
PythonDictionary &sys_module_dict = GetSysModuleDictionary();
if (sys_module_dict.IsValid()) {
File in_file(in, false);
File out_file(out, false);
File err_file(err, false);
lldb::StreamFileSP in_sp;
lldb::StreamFileSP out_sp;
lldb::StreamFileSP err_sp;
if (!in_file.IsValid() || !out_file.IsValid() || !err_file.IsValid())
m_debugger.AdoptTopIOHandlerFilesIfInvalid(in_sp, out_sp, err_sp);
if (on_entry_flags & Locker::NoSTDIN) {
m_saved_stdin.Reset();
} else {
if (!SetStdHandle(in_file, "stdin", m_saved_stdin, "r")) {
if (in_sp)
SetStdHandle(in_sp->GetFile(), "stdin", m_saved_stdin, "r");
}
}
if (!SetStdHandle(out_file, "stdout", m_saved_stdout, "w")) {
if (out_sp)
SetStdHandle(out_sp->GetFile(), "stdout", m_saved_stdout, "w");
}
if (!SetStdHandle(err_file, "stderr", m_saved_stderr, "w")) {
if (err_sp)
SetStdHandle(err_sp->GetFile(), "stderr", m_saved_stderr, "w");
}
}
if (PyErr_Occurred())
PyErr_Clear();
return true;
}
PythonObject &ScriptInterpreterPythonImpl::GetMainModule() {
if (!m_main_module.IsValid())
m_main_module.Reset(PyRefType::Borrowed, PyImport_AddModule("__main__"));
return m_main_module;
}
PythonDictionary &ScriptInterpreterPythonImpl::GetSessionDictionary() {
if (m_session_dict.IsValid())
return m_session_dict;
PythonObject &main_module = GetMainModule();
if (!main_module.IsValid())
return m_session_dict;
PythonDictionary main_dict(PyRefType::Borrowed,
PyModule_GetDict(main_module.get()));
if (!main_dict.IsValid())
return m_session_dict;
PythonObject item = main_dict.GetItemForKey(PythonString(m_dictionary_name));
m_session_dict.Reset(PyRefType::Borrowed, item.get());
return m_session_dict;
}
PythonDictionary &ScriptInterpreterPythonImpl::GetSysModuleDictionary() {
if (m_sys_module_dict.IsValid())
return m_sys_module_dict;
PythonObject sys_module(PyRefType::Borrowed, PyImport_AddModule("sys"));
if (sys_module.IsValid())
m_sys_module_dict.Reset(PyRefType::Borrowed,
PyModule_GetDict(sys_module.get()));
return m_sys_module_dict;
}
static std::string GenerateUniqueName(const char *base_name_wanted,
uint32_t &functions_counter,
const void *name_token = nullptr) {
StreamString sstr;
if (!base_name_wanted)
return std::string();
if (!name_token)
sstr.Printf("%s_%d", base_name_wanted, functions_counter++);
else
sstr.Printf("%s_%p", base_name_wanted, name_token);
return sstr.GetString();
}
bool ScriptInterpreterPythonImpl::GetEmbeddedInterpreterModuleObjects() {
if (m_run_one_line_function.IsValid())
return true;
PythonObject module(PyRefType::Borrowed,
PyImport_AddModule("lldb.embedded_interpreter"));
if (!module.IsValid())
return false;
PythonDictionary module_dict(PyRefType::Borrowed,
PyModule_GetDict(module.get()));
if (!module_dict.IsValid())
return false;
m_run_one_line_function =
module_dict.GetItemForKey(PythonString("run_one_line"));
m_run_one_line_str_global =
module_dict.GetItemForKey(PythonString("g_run_one_line_str"));
return m_run_one_line_function.IsValid();
}
static void ReadThreadBytesReceived(void *baton, const void *src,
size_t src_len) {
if (src && src_len) {
Stream *strm = (Stream *)baton;
strm->Write(src, src_len);
strm->Flush();
}
}
bool ScriptInterpreterPythonImpl::ExecuteOneLine(
llvm::StringRef command, CommandReturnObject *result,
const ExecuteScriptOptions &options) {
std::string command_str = command.str();
if (!m_valid_session)
return false;
if (!command.empty()) {
// We want to call run_one_line, passing in the dictionary and the command
// string. We cannot do this through PyRun_SimpleString here because the
// command string may contain escaped characters, and putting it inside
// another string to pass to PyRun_SimpleString messes up the escaping. So
// we use the following more complicated method to pass the command string
// directly down to Python.
Debugger &debugger = m_debugger;
StreamFileSP input_file_sp;
StreamFileSP output_file_sp;
StreamFileSP error_file_sp;
Communication output_comm(
"lldb.ScriptInterpreterPythonImpl.ExecuteOneLine.comm");
bool join_read_thread = false;
if (options.GetEnableIO()) {
if (result) {
input_file_sp = debugger.GetInputFile();
// Set output to a temporary file so we can forward the results on to
// the result object
Pipe pipe;
Status pipe_result = pipe.CreateNew(false);
if (pipe_result.Success()) {
#if defined(_WIN32)
lldb::file_t read_file = pipe.GetReadNativeHandle();
pipe.ReleaseReadFileDescriptor();
std::unique_ptr<ConnectionGenericFile> conn_up(
new ConnectionGenericFile(read_file, true));
#else
std::unique_ptr<ConnectionFileDescriptor> conn_up(
new ConnectionFileDescriptor(pipe.ReleaseReadFileDescriptor(),
true));
#endif
if (conn_up->IsConnected()) {
output_comm.SetConnection(conn_up.release());
output_comm.SetReadThreadBytesReceivedCallback(
ReadThreadBytesReceived, &result->GetOutputStream());
output_comm.StartReadThread();
join_read_thread = true;
FILE *outfile_handle =
fdopen(pipe.ReleaseWriteFileDescriptor(), "w");
output_file_sp = std::make_shared<StreamFile>(outfile_handle, true);
error_file_sp = output_file_sp;
if (outfile_handle)
::setbuf(outfile_handle, nullptr);
result->SetImmediateOutputFile(
debugger.GetOutputFile()->GetFile().GetStream());
result->SetImmediateErrorFile(
debugger.GetErrorFile()->GetFile().GetStream());
}
}
}
if (!input_file_sp || !output_file_sp || !error_file_sp)
debugger.AdoptTopIOHandlerFilesIfInvalid(input_file_sp, output_file_sp,
error_file_sp);
} else {
input_file_sp = std::make_shared<StreamFile>();
FileSystem::Instance().Open(input_file_sp->GetFile(),
FileSpec(FileSystem::DEV_NULL),
File::eOpenOptionRead);
output_file_sp = std::make_shared<StreamFile>();
FileSystem::Instance().Open(output_file_sp->GetFile(),
FileSpec(FileSystem::DEV_NULL),
File::eOpenOptionWrite);
error_file_sp = output_file_sp;
}
FILE *in_file = input_file_sp->GetFile().GetStream();
FILE *out_file = output_file_sp->GetFile().GetStream();
FILE *err_file = error_file_sp->GetFile().GetStream();
bool success = false;
{
// WARNING! It's imperative that this RAII scope be as tight as
// possible. In particular, the scope must end *before* we try to join
// the read thread. The reason for this is that a pre-requisite for
// joining the read thread is that we close the write handle (to break
// the pipe and cause it to wake up and exit). But acquiring the GIL as
// below will redirect Python's stdio to use this same handle. If we
// close the handle while Python is still using it, bad things will
// happen.
Locker locker(
this,
Locker::AcquireLock | Locker::InitSession |
(options.GetSetLLDBGlobals() ? Locker::InitGlobals : 0) |
((result && result->GetInteractive()) ? 0 : Locker::NoSTDIN),
Locker::FreeAcquiredLock | Locker::TearDownSession, in_file, out_file,
err_file);
// Find the correct script interpreter dictionary in the main module.
PythonDictionary &session_dict = GetSessionDictionary();
if (session_dict.IsValid()) {
if (GetEmbeddedInterpreterModuleObjects()) {
if (PyCallable_Check(m_run_one_line_function.get())) {
PythonObject pargs(
PyRefType::Owned,
Py_BuildValue("(Os)", session_dict.get(), command_str.c_str()));
if (pargs.IsValid()) {
PythonObject return_value(
PyRefType::Owned,
PyObject_CallObject(m_run_one_line_function.get(),
pargs.get()));
if (return_value.IsValid())
success = true;
else if (options.GetMaskoutErrors() && PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
}
}
}
}
// Flush our output and error file handles
::fflush(out_file);
if (out_file != err_file)
::fflush(err_file);
}
if (join_read_thread) {
// Close the write end of the pipe since we are done with our one line
// script. This should cause the read thread that output_comm is using to
// exit
output_file_sp->GetFile().Close();
// The close above should cause this thread to exit when it gets to the
// end of file, so let it get all its data
output_comm.JoinReadThread();
// Now we can close the read end of the pipe
output_comm.Disconnect();
}
if (success)
return true;
// The one-liner failed. Append the error message.
if (result) {
result->AppendErrorWithFormat(
"python failed attempting to evaluate '%s'\n", command_str.c_str());
}
return false;
}
if (result)
result->AppendError("empty command passed to python\n");
return false;
}
void ScriptInterpreterPythonImpl::ExecuteInterpreterLoop() {
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, LLVM_PRETTY_FUNCTION);
Debugger &debugger = m_debugger;
// At the moment, the only time the debugger does not have an input file
// handle is when this is called directly from Python, in which case it is
// both dangerous and unnecessary (not to mention confusing) to try to embed
// a running interpreter loop inside the already running Python interpreter
// loop, so we won't do it.
if (!debugger.GetInputFile()->GetFile().IsValid())
return;
IOHandlerSP io_handler_sp(new IOHandlerPythonInterpreter(debugger, this));
if (io_handler_sp) {
debugger.PushIOHandler(io_handler_sp);
}
}
bool ScriptInterpreterPythonImpl::Interrupt() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
if (IsExecutingPython()) {
PyThreadState *state = PyThreadState_GET();
if (!state)
state = GetThreadState();
if (state) {
long tid = state->thread_id;
PyThreadState_Swap(state);
int num_threads = PyThreadState_SetAsyncExc(tid, PyExc_KeyboardInterrupt);
if (log)
log->Printf("ScriptInterpreterPythonImpl::Interrupt() sending "
"PyExc_KeyboardInterrupt (tid = %li, num_threads = %i)...",
tid, num_threads);
return true;
}
}
if (log)
log->Printf(
"ScriptInterpreterPythonImpl::Interrupt() python code not running, "
"can't interrupt");
return false;
}
bool ScriptInterpreterPythonImpl::ExecuteOneLineWithReturn(
llvm::StringRef in_string, ScriptInterpreter::ScriptReturnType return_type,
void *ret_value, const ExecuteScriptOptions &options) {
Locker locker(this,
Locker::AcquireLock | Locker::InitSession |
(options.GetSetLLDBGlobals() ? Locker::InitGlobals : 0) |
Locker::NoSTDIN,
Locker::FreeAcquiredLock | Locker::TearDownSession);
PythonObject py_return;
PythonObject &main_module = GetMainModule();
PythonDictionary globals(PyRefType::Borrowed,
PyModule_GetDict(main_module.get()));
PythonObject py_error;
bool ret_success = false;
int success;
PythonDictionary locals = GetSessionDictionary();
if (!locals.IsValid()) {
locals.Reset(
PyRefType::Owned,
PyObject_GetAttrString(globals.get(), m_dictionary_name.c_str()));
}
if (!locals.IsValid())
locals = globals;
py_error.Reset(PyRefType::Borrowed, PyErr_Occurred());
if (py_error.IsValid())
PyErr_Clear();
std::string as_string = in_string.str();
{ // scope for PythonInputReaderManager
// PythonInputReaderManager py_input(options.GetEnableIO() ? this : NULL);
py_return.Reset(PyRefType::Owned,
PyRun_String(as_string.c_str(), Py_eval_input,
globals.get(), locals.get()));
if (!py_return.IsValid()) {
py_error.Reset(PyRefType::Borrowed, PyErr_Occurred());
if (py_error.IsValid())
PyErr_Clear();
py_return.Reset(PyRefType::Owned,
PyRun_String(as_string.c_str(), Py_single_input,
globals.get(), locals.get()));
}
}
if (py_return.IsValid()) {
switch (return_type) {
case eScriptReturnTypeCharPtr: // "char *"
{
const char format[3] = "s#";
success = PyArg_Parse(py_return.get(), format, (char **)ret_value);
break;
}
case eScriptReturnTypeCharStrOrNone: // char* or NULL if py_return ==
// Py_None
{
const char format[3] = "z";
success = PyArg_Parse(py_return.get(), format, (char **)ret_value);
break;
}
case eScriptReturnTypeBool: {
const char format[2] = "b";
success = PyArg_Parse(py_return.get(), format, (bool *)ret_value);
break;
}
case eScriptReturnTypeShortInt: {
const char format[2] = "h";
success = PyArg_Parse(py_return.get(), format, (short *)ret_value);
break;
}
case eScriptReturnTypeShortIntUnsigned: {
const char format[2] = "H";
success =
PyArg_Parse(py_return.get(), format, (unsigned short *)ret_value);
break;
}
case eScriptReturnTypeInt: {
const char format[2] = "i";
success = PyArg_Parse(py_return.get(), format, (int *)ret_value);
break;
}
case eScriptReturnTypeIntUnsigned: {
const char format[2] = "I";
success = PyArg_Parse(py_return.get(), format, (unsigned int *)ret_value);
break;
}
case eScriptReturnTypeLongInt: {
const char format[2] = "l";
success = PyArg_Parse(py_return.get(), format, (long *)ret_value);
break;
}
case eScriptReturnTypeLongIntUnsigned: {
const char format[2] = "k";
success =
PyArg_Parse(py_return.get(), format, (unsigned long *)ret_value);
break;
}
case eScriptReturnTypeLongLong: {
const char format[2] = "L";
success = PyArg_Parse(py_return.get(), format, (long long *)ret_value);
break;
}
case eScriptReturnTypeLongLongUnsigned: {
const char format[2] = "K";
success =
PyArg_Parse(py_return.get(), format, (unsigned long long *)ret_value);
break;
}
case eScriptReturnTypeFloat: {
const char format[2] = "f";
success = PyArg_Parse(py_return.get(), format, (float *)ret_value);
break;
}
case eScriptReturnTypeDouble: {
const char format[2] = "d";
success = PyArg_Parse(py_return.get(), format, (double *)ret_value);
break;
}
case eScriptReturnTypeChar: {
const char format[2] = "c";
success = PyArg_Parse(py_return.get(), format, (char *)ret_value);
break;
}
case eScriptReturnTypeOpaqueObject: {
success = true;
PyObject *saved_value = py_return.get();
Py_XINCREF(saved_value);
*((PyObject **)ret_value) = saved_value;
break;
}
}
ret_success = success;
}
py_error.Reset(PyRefType::Borrowed, PyErr_Occurred());
if (py_error.IsValid()) {
ret_success = false;
if (options.GetMaskoutErrors()) {
if (PyErr_GivenExceptionMatches(py_error.get(), PyExc_SyntaxError))
PyErr_Print();
PyErr_Clear();
}
}
return ret_success;
}
Status ScriptInterpreterPythonImpl::ExecuteMultipleLines(
const char *in_string, const ExecuteScriptOptions &options) {
Status error;
Locker locker(this,
Locker::AcquireLock | Locker::InitSession |
(options.GetSetLLDBGlobals() ? Locker::InitGlobals : 0) |
Locker::NoSTDIN,
Locker::FreeAcquiredLock | Locker::TearDownSession);
PythonObject return_value;
PythonObject &main_module = GetMainModule();
PythonDictionary globals(PyRefType::Borrowed,
PyModule_GetDict(main_module.get()));
PythonObject py_error;
PythonDictionary locals = GetSessionDictionary();
if (!locals.IsValid())
locals.Reset(
PyRefType::Owned,
PyObject_GetAttrString(globals.get(), m_dictionary_name.c_str()));
if (!locals.IsValid())
locals = globals;
py_error.Reset(PyRefType::Borrowed, PyErr_Occurred());
if (py_error.IsValid())
PyErr_Clear();
if (in_string != nullptr) {
PythonObject code_object;
code_object.Reset(PyRefType::Owned,
Py_CompileString(in_string, "temp.py", Py_file_input));
if (code_object.IsValid()) {
// In Python 2.x, PyEval_EvalCode takes a PyCodeObject, but in Python 3.x, it
// takes a PyObject. They are convertible (hence the function
// PyCode_Check(PyObject*), so we have to do the cast for Python 2.x
#if PY_MAJOR_VERSION >= 3
PyObject *py_code_obj = code_object.get();
#else
PyCodeObject *py_code_obj =
reinterpret_cast<PyCodeObject *>(code_object.get());
#endif
return_value.Reset(
PyRefType::Owned,
PyEval_EvalCode(py_code_obj, globals.get(), locals.get()));
}
}
PythonExceptionState exception_state(!options.GetMaskoutErrors());
if (exception_state.IsError())
error.SetErrorString(exception_state.Format().c_str());
return error;
}
void ScriptInterpreterPythonImpl::CollectDataForBreakpointCommandCallback(
std::vector<BreakpointOptions *> &bp_options_vec,
CommandReturnObject &result) {
m_active_io_handler = eIOHandlerBreakpoint;
m_debugger.GetCommandInterpreter().GetPythonCommandsFromIOHandler(
" ", *this, true, &bp_options_vec);
}
void ScriptInterpreterPythonImpl::CollectDataForWatchpointCommandCallback(
WatchpointOptions *wp_options, CommandReturnObject &result) {
m_active_io_handler = eIOHandlerWatchpoint;
m_debugger.GetCommandInterpreter().GetPythonCommandsFromIOHandler(
" ", *this, true, wp_options);
}
void ScriptInterpreterPythonImpl::SetBreakpointCommandCallbackFunction(
BreakpointOptions *bp_options, const char *function_name) {
// For now just cons up a oneliner that calls the provided function.
std::string oneliner("return ");
oneliner += function_name;
oneliner += "(frame, bp_loc, internal_dict)";
m_debugger.GetScriptInterpreter()->SetBreakpointCommandCallback(
bp_options, oneliner.c_str());
}
Status ScriptInterpreterPythonImpl::SetBreakpointCommandCallback(
BreakpointOptions *bp_options,
std::unique_ptr<BreakpointOptions::CommandData> &cmd_data_up) {
Status error;
error = GenerateBreakpointCommandCallbackData(cmd_data_up->user_source,
cmd_data_up->script_source);
if (error.Fail()) {
return error;
}
auto baton_sp =
std::make_shared<BreakpointOptions::CommandBaton>(std::move(cmd_data_up));
bp_options->SetCallback(
ScriptInterpreterPythonImpl::BreakpointCallbackFunction, baton_sp);
return error;
}
// Set a Python one-liner as the callback for the breakpoint.
Status ScriptInterpreterPythonImpl::SetBreakpointCommandCallback(
BreakpointOptions *bp_options, const char *command_body_text) {
auto data_up = llvm::make_unique<CommandDataPython>();
// Split the command_body_text into lines, and pass that to
// GenerateBreakpointCommandCallbackData. That will wrap the body in an
// auto-generated function, and return the function name in script_source.
// That is what the callback will actually invoke.
data_up->user_source.SplitIntoLines(command_body_text);
Status error = GenerateBreakpointCommandCallbackData(data_up->user_source,
data_up->script_source);
if (error.Success()) {
auto baton_sp =
std::make_shared<BreakpointOptions::CommandBaton>(std::move(data_up));
bp_options->SetCallback(
ScriptInterpreterPythonImpl::BreakpointCallbackFunction, baton_sp);
return error;
} else
return error;
}
// Set a Python one-liner as the callback for the watchpoint.
void ScriptInterpreterPythonImpl::SetWatchpointCommandCallback(
WatchpointOptions *wp_options, const char *oneliner) {
auto data_up = llvm::make_unique<WatchpointOptions::CommandData>();
// It's necessary to set both user_source and script_source to the oneliner.
// The former is used to generate callback description (as in watchpoint
// command list) while the latter is used for Python to interpret during the
// actual callback.
data_up->user_source.AppendString(oneliner);
data_up->script_source.assign(oneliner);
if (GenerateWatchpointCommandCallbackData(data_up->user_source,
data_up->script_source)) {
auto baton_sp =
std::make_shared<WatchpointOptions::CommandBaton>(std::move(data_up));
wp_options->SetCallback(
ScriptInterpreterPythonImpl::WatchpointCallbackFunction, baton_sp);
}
return;
}
Status ScriptInterpreterPythonImpl::ExportFunctionDefinitionToInterpreter(
StringList &function_def) {
// Convert StringList to one long, newline delimited, const char *.
std::string function_def_string(function_def.CopyList());
Status error = ExecuteMultipleLines(
function_def_string.c_str(),
ScriptInterpreter::ExecuteScriptOptions().SetEnableIO(false));
return error;
}
Status ScriptInterpreterPythonImpl::GenerateFunction(const char *signature,
const StringList &input) {
Status error;
int num_lines = input.GetSize();
if (num_lines == 0) {
error.SetErrorString("No input data.");
return error;
}
if (!signature || *signature == 0) {
error.SetErrorString("No output function name.");
return error;
}
StreamString sstr;
StringList auto_generated_function;
auto_generated_function.AppendString(signature);
auto_generated_function.AppendString(
" global_dict = globals()"); // Grab the global dictionary
auto_generated_function.AppendString(
" new_keys = internal_dict.keys()"); // Make a list of keys in the
// session dict
auto_generated_function.AppendString(
" old_keys = global_dict.keys()"); // Save list of keys in global dict
auto_generated_function.AppendString(
" global_dict.update (internal_dict)"); // Add the session dictionary
// to the
// global dictionary.
// Wrap everything up inside the function, increasing the indentation.
auto_generated_function.AppendString(" if True:");
for (int i = 0; i < num_lines; ++i) {
sstr.Clear();
sstr.Printf(" %s", input.GetStringAtIndex(i));
auto_generated_function.AppendString(sstr.GetData());
}
auto_generated_function.AppendString(
" for key in new_keys:"); // Iterate over all the keys from session
// dict
auto_generated_function.AppendString(
" internal_dict[key] = global_dict[key]"); // Update session dict
// values
auto_generated_function.AppendString(
" if key not in old_keys:"); // If key was not originally in
// global dict
auto_generated_function.AppendString(
" del global_dict[key]"); // ...then remove key/value from
// global dict
// Verify that the results are valid Python.
error = ExportFunctionDefinitionToInterpreter(auto_generated_function);
return error;
}
bool ScriptInterpreterPythonImpl::GenerateTypeScriptFunction(
StringList &user_input, std::string &output, const void *name_token) {
static uint32_t num_created_functions = 0;
user_input.RemoveBlankLines();
StreamString sstr;
// Check to see if we have any data; if not, just return.
if (user_input.GetSize() == 0)
return false;
// Take what the user wrote, wrap it all up inside one big auto-generated
// Python function, passing in the ValueObject as parameter to the function.
std::string auto_generated_function_name(
GenerateUniqueName("lldb_autogen_python_type_print_func",
num_created_functions, name_token));
sstr.Printf("def %s (valobj, internal_dict):",
auto_generated_function_name.c_str());
if (!GenerateFunction(sstr.GetData(), user_input).Success())
return false;
// Store the name of the auto-generated function to be called.
output.assign(auto_generated_function_name);
return true;
}
bool ScriptInterpreterPythonImpl::GenerateScriptAliasFunction(
StringList &user_input, std::string &output) {
static uint32_t num_created_functions = 0;
user_input.RemoveBlankLines();
StreamString sstr;
// Check to see if we have any data; if not, just return.
if (user_input.GetSize() == 0)
return false;
std::string auto_generated_function_name(GenerateUniqueName(
"lldb_autogen_python_cmd_alias_func", num_created_functions));
sstr.Printf("def %s (debugger, args, result, internal_dict):",
auto_generated_function_name.c_str());
if (!GenerateFunction(sstr.GetData(), user_input).Success())
return false;
// Store the name of the auto-generated function to be called.
output.assign(auto_generated_function_name);
return true;
}
bool ScriptInterpreterPythonImpl::GenerateTypeSynthClass(
StringList &user_input, std::string &output, const void *name_token) {
static uint32_t num_created_classes = 0;
user_input.RemoveBlankLines();
int num_lines = user_input.GetSize();
StreamString sstr;
// Check to see if we have any data; if not, just return.
if (user_input.GetSize() == 0)
return false;
// Wrap all user input into a Python class
std::string auto_generated_class_name(GenerateUniqueName(
"lldb_autogen_python_type_synth_class", num_created_classes, name_token));
StringList auto_generated_class;
// Create the function name & definition string.
sstr.Printf("class %s:", auto_generated_class_name.c_str());
auto_generated_class.AppendString(sstr.GetString());
// Wrap everything up inside the class, increasing the indentation. we don't
// need to play any fancy indentation tricks here because there is no
// surrounding code whose indentation we need to honor
for (int i = 0; i < num_lines; ++i) {
sstr.Clear();
sstr.Printf(" %s", user_input.GetStringAtIndex(i));
auto_generated_class.AppendString(sstr.GetString());
}
// Verify that the results are valid Python. (even though the method is
// ExportFunctionDefinitionToInterpreter, a class will actually be exported)
// (TODO: rename that method to ExportDefinitionToInterpreter)
if (!ExportFunctionDefinitionToInterpreter(auto_generated_class).Success())
return false;
// Store the name of the auto-generated class
output.assign(auto_generated_class_name);
return true;
}
StructuredData::GenericSP
ScriptInterpreterPythonImpl::CreateFrameRecognizer(const char *class_name) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::GenericSP();
void *ret_val;
{
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
Locker::FreeLock);
ret_val = LLDBSWIGPython_CreateFrameRecognizer(class_name,
m_dictionary_name.c_str());
}
return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
}
lldb::ValueObjectListSP ScriptInterpreterPythonImpl::GetRecognizedArguments(
const StructuredData::ObjectSP &os_plugin_object_sp,
lldb::StackFrameSP frame_sp) {
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
if (!os_plugin_object_sp)
return ValueObjectListSP();
StructuredData::Generic *generic = os_plugin_object_sp->GetAsGeneric();
if (!generic)
return nullptr;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return ValueObjectListSP();
PythonObject py_return(PyRefType::Owned,
(PyObject *)LLDBSwigPython_GetRecognizedArguments(
implementor.get(), frame_sp));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.get()) {
PythonList result_list(PyRefType::Borrowed, py_return.get());
ValueObjectListSP result = ValueObjectListSP(new ValueObjectList());
for (size_t i = 0; i < result_list.GetSize(); i++) {
PyObject *item = result_list.GetItemAtIndex(i).get();
lldb::SBValue *sb_value_ptr =
(lldb::SBValue *)LLDBSWIGPython_CastPyObjectToSBValue(item);
auto valobj_sp = LLDBSWIGPython_GetValueObjectSPFromSBValue(sb_value_ptr);
if (valobj_sp)
result->Append(valobj_sp);
}
return result;
}
return ValueObjectListSP();
}
StructuredData::GenericSP
ScriptInterpreterPythonImpl::OSPlugin_CreatePluginObject(
const char *class_name, lldb::ProcessSP process_sp) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::GenericSP();
if (!process_sp)
return StructuredData::GenericSP();
void *ret_val;
{
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
Locker::FreeLock);
ret_val = LLDBSWIGPythonCreateOSPlugin(
class_name, m_dictionary_name.c_str(), process_sp);
}
return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
}
StructuredData::DictionarySP ScriptInterpreterPythonImpl::OSPlugin_RegisterInfo(
StructuredData::ObjectSP os_plugin_object_sp) {
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_register_info";
if (!os_plugin_object_sp)
return StructuredData::DictionarySP();
StructuredData::Generic *generic = os_plugin_object_sp->GetAsGeneric();
if (!generic)
return nullptr;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return StructuredData::DictionarySP();
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return StructuredData::DictionarySP();
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return StructuredData::DictionarySP();
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.get()) {
PythonDictionary result_dict(PyRefType::Borrowed, py_return.get());
return result_dict.CreateStructuredDictionary();
}
return StructuredData::DictionarySP();
}
StructuredData::ArraySP ScriptInterpreterPythonImpl::OSPlugin_ThreadsInfo(
StructuredData::ObjectSP os_plugin_object_sp) {
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_thread_info";
if (!os_plugin_object_sp)
return StructuredData::ArraySP();
StructuredData::Generic *generic = os_plugin_object_sp->GetAsGeneric();
if (!generic)
return nullptr;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return StructuredData::ArraySP();
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return StructuredData::ArraySP();
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return StructuredData::ArraySP();
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.get()) {
PythonList result_list(PyRefType::Borrowed, py_return.get());
return result_list.CreateStructuredArray();
}
return StructuredData::ArraySP();
}
// GetPythonValueFormatString provides a system independent type safe way to
// convert a variable's type into a python value format. Python value formats
// are defined in terms of builtin C types and could change from system to as
// the underlying typedef for uint* types, size_t, off_t and other values
// change.
template <typename T> const char *GetPythonValueFormatString(T t);
template <> const char *GetPythonValueFormatString(char *) { return "s"; }
template <> const char *GetPythonValueFormatString(char) { return "b"; }
template <> const char *GetPythonValueFormatString(unsigned char) {
return "B";
}
template <> const char *GetPythonValueFormatString(short) { return "h"; }
template <> const char *GetPythonValueFormatString(unsigned short) {
return "H";
}
template <> const char *GetPythonValueFormatString(int) { return "i"; }
template <> const char *GetPythonValueFormatString(unsigned int) { return "I"; }
template <> const char *GetPythonValueFormatString(long) { return "l"; }
template <> const char *GetPythonValueFormatString(unsigned long) {
return "k";
}
template <> const char *GetPythonValueFormatString(long long) { return "L"; }
template <> const char *GetPythonValueFormatString(unsigned long long) {
return "K";
}
template <> const char *GetPythonValueFormatString(float t) { return "f"; }
template <> const char *GetPythonValueFormatString(double t) { return "d"; }
StructuredData::StringSP
ScriptInterpreterPythonImpl::OSPlugin_RegisterContextData(
StructuredData::ObjectSP os_plugin_object_sp, lldb::tid_t tid) {
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_register_data";
static char *param_format =
const_cast<char *>(GetPythonValueFormatString(tid));
if (!os_plugin_object_sp)
return StructuredData::StringSP();
StructuredData::Generic *generic = os_plugin_object_sp->GetAsGeneric();
if (!generic)
return nullptr;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return StructuredData::StringSP();
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return StructuredData::StringSP();
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return StructuredData::StringSP();
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, param_format, tid));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.get()) {
PythonBytes result(PyRefType::Borrowed, py_return.get());
return result.CreateStructuredString();
}
return StructuredData::StringSP();
}
StructuredData::DictionarySP ScriptInterpreterPythonImpl::OSPlugin_CreateThread(
StructuredData::ObjectSP os_plugin_object_sp, lldb::tid_t tid,
lldb::addr_t context) {
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "create_thread";
std::string param_format;
param_format += GetPythonValueFormatString(tid);
param_format += GetPythonValueFormatString(context);
if (!os_plugin_object_sp)
return StructuredData::DictionarySP();
StructuredData::Generic *generic = os_plugin_object_sp->GetAsGeneric();
if (!generic)
return nullptr;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return StructuredData::DictionarySP();
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return StructuredData::DictionarySP();
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return StructuredData::DictionarySP();
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name,
&param_format[0], tid, context));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.get()) {
PythonDictionary result_dict(PyRefType::Borrowed, py_return.get());
return result_dict.CreateStructuredDictionary();
}
return StructuredData::DictionarySP();
}
StructuredData::ObjectSP ScriptInterpreterPythonImpl::CreateScriptedThreadPlan(
const char *class_name, lldb::ThreadPlanSP thread_plan_sp) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::ObjectSP();
if (!thread_plan_sp.get())
return StructuredData::ObjectSP();
Debugger &debugger = thread_plan_sp->GetTarget().GetDebugger();
ScriptInterpreter *script_interpreter = debugger.GetScriptInterpreter();
ScriptInterpreterPythonImpl *python_interpreter =
static_cast<ScriptInterpreterPythonImpl *>(script_interpreter);
if (!script_interpreter)
return StructuredData::ObjectSP();
void *ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPythonCreateScriptedThreadPlan(
class_name, python_interpreter->m_dictionary_name.c_str(),
thread_plan_sp);
}
return StructuredData::ObjectSP(new StructuredPythonObject(ret_val));
}
bool ScriptInterpreterPythonImpl::ScriptedThreadPlanExplainsStop(
StructuredData::ObjectSP implementor_sp, Event *event, bool &script_error) {
bool explains_stop = true;
StructuredData::Generic *generic = nullptr;
if (implementor_sp)
generic = implementor_sp->GetAsGeneric();
if (generic) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
explains_stop = LLDBSWIGPythonCallThreadPlan(
generic->GetValue(), "explains_stop", event, script_error);
if (script_error)
return true;
}
return explains_stop;
}
bool ScriptInterpreterPythonImpl::ScriptedThreadPlanShouldStop(
StructuredData::ObjectSP implementor_sp, Event *event, bool &script_error) {
bool should_stop = true;
StructuredData::Generic *generic = nullptr;
if (implementor_sp)
generic = implementor_sp->GetAsGeneric();
if (generic) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
should_stop = LLDBSWIGPythonCallThreadPlan(
generic->GetValue(), "should_stop", event, script_error);
if (script_error)
return true;
}
return should_stop;
}
bool ScriptInterpreterPythonImpl::ScriptedThreadPlanIsStale(
StructuredData::ObjectSP implementor_sp, bool &script_error) {
bool is_stale = true;
StructuredData::Generic *generic = nullptr;
if (implementor_sp)
generic = implementor_sp->GetAsGeneric();
if (generic) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
is_stale = LLDBSWIGPythonCallThreadPlan(generic->GetValue(), "is_stale",
nullptr, script_error);
if (script_error)
return true;
}
return is_stale;
}
lldb::StateType ScriptInterpreterPythonImpl::ScriptedThreadPlanGetRunState(
StructuredData::ObjectSP implementor_sp, bool &script_error) {
bool should_step = false;
StructuredData::Generic *generic = nullptr;
if (implementor_sp)
generic = implementor_sp->GetAsGeneric();
if (generic) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
should_step = LLDBSWIGPythonCallThreadPlan(
generic->GetValue(), "should_step", nullptr, script_error);
if (script_error)
should_step = true;
}
if (should_step)
return lldb::eStateStepping;
else
return lldb::eStateRunning;
}
StructuredData::GenericSP
ScriptInterpreterPythonImpl::CreateScriptedBreakpointResolver(
const char *class_name, StructuredDataImpl *args_data,
lldb::BreakpointSP &bkpt_sp) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::GenericSP();
if (!bkpt_sp.get())
return StructuredData::GenericSP();
Debugger &debugger = bkpt_sp->GetTarget().GetDebugger();
ScriptInterpreter *script_interpreter = debugger.GetScriptInterpreter();
ScriptInterpreterPythonImpl *python_interpreter =
static_cast<ScriptInterpreterPythonImpl *>(script_interpreter);
if (!script_interpreter)
return StructuredData::GenericSP();
void *ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPythonCreateScriptedBreakpointResolver(
class_name, python_interpreter->m_dictionary_name.c_str(), args_data,
bkpt_sp);
}
return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
}
bool ScriptInterpreterPythonImpl::ScriptedBreakpointResolverSearchCallback(
StructuredData::GenericSP implementor_sp, SymbolContext *sym_ctx) {
bool should_continue = false;
if (implementor_sp) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
should_continue = LLDBSwigPythonCallBreakpointResolver(
implementor_sp->GetValue(), "__callback__", sym_ctx);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
}
return should_continue;
}
lldb::SearchDepth
ScriptInterpreterPythonImpl::ScriptedBreakpointResolverSearchDepth(
StructuredData::GenericSP implementor_sp) {
int depth_as_int = lldb::eSearchDepthModule;
if (implementor_sp) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
depth_as_int = LLDBSwigPythonCallBreakpointResolver(
implementor_sp->GetValue(), "__get_depth__", nullptr);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
}
if (depth_as_int == lldb::eSearchDepthInvalid)
return lldb::eSearchDepthModule;
if (depth_as_int <= lldb::kLastSearchDepthKind)
return (lldb::SearchDepth)depth_as_int;
else
return lldb::eSearchDepthModule;
}
StructuredData::ObjectSP
ScriptInterpreterPythonImpl::LoadPluginModule(const FileSpec &file_spec,
lldb_private::Status &error) {
if (!FileSystem::Instance().Exists(file_spec)) {
error.SetErrorString("no such file");
return StructuredData::ObjectSP();
}
StructuredData::ObjectSP module_sp;
if (LoadScriptingModule(file_spec.GetPath().c_str(), true, true, error,
&module_sp))
return module_sp;
return StructuredData::ObjectSP();
}
StructuredData::DictionarySP ScriptInterpreterPythonImpl::GetDynamicSettings(
StructuredData::ObjectSP plugin_module_sp, Target *target,
const char *setting_name, lldb_private::Status &error) {
if (!plugin_module_sp || !target || !setting_name || !setting_name[0])
return StructuredData::DictionarySP();
StructuredData::Generic *generic = plugin_module_sp->GetAsGeneric();
if (!generic)
return StructuredData::DictionarySP();
PythonObject reply_pyobj;
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
TargetSP target_sp(target->shared_from_this());
reply_pyobj.Reset(PyRefType::Owned,
(PyObject *)LLDBSWIGPython_GetDynamicSetting(
generic->GetValue(), setting_name, target_sp));
PythonDictionary py_dict(PyRefType::Borrowed, reply_pyobj.get());
return py_dict.CreateStructuredDictionary();
}
StructuredData::ObjectSP
ScriptInterpreterPythonImpl::CreateSyntheticScriptedProvider(
const char *class_name, lldb::ValueObjectSP valobj) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::ObjectSP();
if (!valobj.get())
return StructuredData::ObjectSP();
ExecutionContext exe_ctx(valobj->GetExecutionContextRef());
Target *target = exe_ctx.GetTargetPtr();
if (!target)
return StructuredData::ObjectSP();
Debugger &debugger = target->GetDebugger();
ScriptInterpreter *script_interpreter = debugger.GetScriptInterpreter();
ScriptInterpreterPythonImpl *python_interpreter =
(ScriptInterpreterPythonImpl *)script_interpreter;
if (!script_interpreter)
return StructuredData::ObjectSP();
void *ret_val = nullptr;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPythonCreateSyntheticProvider(
class_name, python_interpreter->m_dictionary_name.c_str(), valobj);
}
return StructuredData::ObjectSP(new StructuredPythonObject(ret_val));
}
StructuredData::GenericSP
ScriptInterpreterPythonImpl::CreateScriptCommandObject(const char *class_name) {
DebuggerSP debugger_sp(m_debugger.shared_from_this());
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::GenericSP();
if (!debugger_sp.get())
return StructuredData::GenericSP();
void *ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPythonCreateCommandObject(
class_name, m_dictionary_name.c_str(), debugger_sp);
}
return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
}
bool ScriptInterpreterPythonImpl::GenerateTypeScriptFunction(
const char *oneliner, std::string &output, const void *name_token) {
StringList input;
input.SplitIntoLines(oneliner, strlen(oneliner));
return GenerateTypeScriptFunction(input, output, name_token);
}
bool ScriptInterpreterPythonImpl::GenerateTypeSynthClass(
const char *oneliner, std::string &output, const void *name_token) {
StringList input;
input.SplitIntoLines(oneliner, strlen(oneliner));
return GenerateTypeSynthClass(input, output, name_token);
}
Status ScriptInterpreterPythonImpl::GenerateBreakpointCommandCallbackData(
StringList &user_input, std::string &output) {
static uint32_t num_created_functions = 0;
user_input.RemoveBlankLines();
StreamString sstr;
Status error;
if (user_input.GetSize() == 0) {
error.SetErrorString("No input data.");
return error;
}
std::string auto_generated_function_name(GenerateUniqueName(
"lldb_autogen_python_bp_callback_func_", num_created_functions));
sstr.Printf("def %s (frame, bp_loc, internal_dict):",
auto_generated_function_name.c_str());
error = GenerateFunction(sstr.GetData(), user_input);
if (!error.Success())
return error;
// Store the name of the auto-generated function to be called.
output.assign(auto_generated_function_name);
return error;
}
bool ScriptInterpreterPythonImpl::GenerateWatchpointCommandCallbackData(
StringList &user_input, std::string &output) {
static uint32_t num_created_functions = 0;
user_input.RemoveBlankLines();
StreamString sstr;
if (user_input.GetSize() == 0)
return false;
std::string auto_generated_function_name(GenerateUniqueName(
"lldb_autogen_python_wp_callback_func_", num_created_functions));
sstr.Printf("def %s (frame, wp, internal_dict):",
auto_generated_function_name.c_str());
if (!GenerateFunction(sstr.GetData(), user_input).Success())
return false;
// Store the name of the auto-generated function to be called.
output.assign(auto_generated_function_name);
return true;
}
bool ScriptInterpreterPythonImpl::GetScriptedSummary(
const char *python_function_name, lldb::ValueObjectSP valobj,
StructuredData::ObjectSP &callee_wrapper_sp,
const TypeSummaryOptions &options, std::string &retval) {
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, LLVM_PRETTY_FUNCTION);
if (!valobj.get()) {
retval.assign("<no object>");
return false;
}
void *old_callee = nullptr;
StructuredData::Generic *generic = nullptr;
if (callee_wrapper_sp) {
generic = callee_wrapper_sp->GetAsGeneric();
if (generic)
old_callee = generic->GetValue();
}
void *new_callee = old_callee;
bool ret_val;
if (python_function_name && *python_function_name) {
{
Locker py_lock(this, Locker::AcquireLock | Locker::InitSession |
Locker::NoSTDIN);
{
TypeSummaryOptionsSP options_sp(new TypeSummaryOptions(options));
static Timer::Category func_cat("LLDBSwigPythonCallTypeScript");
Timer scoped_timer(func_cat, "LLDBSwigPythonCallTypeScript");
ret_val = LLDBSwigPythonCallTypeScript(
python_function_name, GetSessionDictionary().get(), valobj,
&new_callee, options_sp, retval);
}
}
} else {
retval.assign("<no function name>");
return false;
}
if (new_callee && old_callee != new_callee)
callee_wrapper_sp = std::make_shared<StructuredPythonObject>(new_callee);
return ret_val;
}
void ScriptInterpreterPythonImpl::Clear() {
// Release any global variables that might have strong references to
// LLDB objects when clearing the python script interpreter.
Locker locker(this, Locker::AcquireLock, Locker::FreeAcquiredLock);
// This may be called as part of Py_Finalize. In that case the modules are
// destroyed in random order and we can't guarantee that we can access these.
if (Py_IsInitialized())
PyRun_SimpleString("lldb.debugger = None; lldb.target = None; lldb.process "
"= None; lldb.thread = None; lldb.frame = None");
}
bool ScriptInterpreterPythonImpl::BreakpointCallbackFunction(
void *baton, StoppointCallbackContext *context, user_id_t break_id,
user_id_t break_loc_id) {
CommandDataPython *bp_option_data = (CommandDataPython *)baton;
const char *python_function_name = bp_option_data->script_source.c_str();
if (!context)
return true;
ExecutionContext exe_ctx(context->exe_ctx_ref);
Target *target = exe_ctx.GetTargetPtr();
if (!target)
return true;
Debugger &debugger = target->GetDebugger();
ScriptInterpreter *script_interpreter = debugger.GetScriptInterpreter();
ScriptInterpreterPythonImpl *python_interpreter =
(ScriptInterpreterPythonImpl *)script_interpreter;
if (!script_interpreter)
return true;
if (python_function_name && python_function_name[0]) {
const StackFrameSP stop_frame_sp(exe_ctx.GetFrameSP());
BreakpointSP breakpoint_sp = target->GetBreakpointByID(break_id);
if (breakpoint_sp) {
const BreakpointLocationSP bp_loc_sp(
breakpoint_sp->FindLocationByID(break_loc_id));
if (stop_frame_sp && bp_loc_sp) {
bool ret_val = true;
{
Locker py_lock(python_interpreter, Locker::AcquireLock |
Locker::InitSession |
Locker::NoSTDIN);
ret_val = LLDBSwigPythonBreakpointCallbackFunction(
python_function_name,
python_interpreter->m_dictionary_name.c_str(), stop_frame_sp,
bp_loc_sp);
}
return ret_val;
}
}
}
// We currently always true so we stop in case anything goes wrong when
// trying to call the script function
return true;
}
bool ScriptInterpreterPythonImpl::WatchpointCallbackFunction(
void *baton, StoppointCallbackContext *context, user_id_t watch_id) {
WatchpointOptions::CommandData *wp_option_data =
(WatchpointOptions::CommandData *)baton;
const char *python_function_name = wp_option_data->script_source.c_str();
if (!context)
return true;
ExecutionContext exe_ctx(context->exe_ctx_ref);
Target *target = exe_ctx.GetTargetPtr();
if (!target)
return true;
Debugger &debugger = target->GetDebugger();
ScriptInterpreter *script_interpreter = debugger.GetScriptInterpreter();
ScriptInterpreterPythonImpl *python_interpreter =
(ScriptInterpreterPythonImpl *)script_interpreter;
if (!script_interpreter)
return true;
if (python_function_name && python_function_name[0]) {
const StackFrameSP stop_frame_sp(exe_ctx.GetFrameSP());
WatchpointSP wp_sp = target->GetWatchpointList().FindByID(watch_id);
if (wp_sp) {
if (stop_frame_sp && wp_sp) {
bool ret_val = true;
{
Locker py_lock(python_interpreter, Locker::AcquireLock |
Locker::InitSession |
Locker::NoSTDIN);
ret_val = LLDBSwigPythonWatchpointCallbackFunction(
python_function_name,
python_interpreter->m_dictionary_name.c_str(), stop_frame_sp,
wp_sp);
}
return ret_val;
}
}
}
// We currently always true so we stop in case anything goes wrong when
// trying to call the script function
return true;
}
size_t ScriptInterpreterPythonImpl::CalculateNumChildren(
const StructuredData::ObjectSP &implementor_sp, uint32_t max) {
if (!implementor_sp)
return 0;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return 0;
void *implementor = generic->GetValue();
if (!implementor)
return 0;
size_t ret_val = 0;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPython_CalculateNumChildren(implementor, max);
}
return ret_val;
}
lldb::ValueObjectSP ScriptInterpreterPythonImpl::GetChildAtIndex(
const StructuredData::ObjectSP &implementor_sp, uint32_t idx) {
if (!implementor_sp)
return lldb::ValueObjectSP();
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return lldb::ValueObjectSP();
void *implementor = generic->GetValue();
if (!implementor)
return lldb::ValueObjectSP();
lldb::ValueObjectSP ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
void *child_ptr = LLDBSwigPython_GetChildAtIndex(implementor, idx);
if (child_ptr != nullptr && child_ptr != Py_None) {
lldb::SBValue *sb_value_ptr =
(lldb::SBValue *)LLDBSWIGPython_CastPyObjectToSBValue(child_ptr);
if (sb_value_ptr == nullptr)
Py_XDECREF(child_ptr);
else
ret_val = LLDBSWIGPython_GetValueObjectSPFromSBValue(sb_value_ptr);
} else {
Py_XDECREF(child_ptr);
}
}
return ret_val;
}
int ScriptInterpreterPythonImpl::GetIndexOfChildWithName(
const StructuredData::ObjectSP &implementor_sp, const char *child_name) {
if (!implementor_sp)
return UINT32_MAX;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return UINT32_MAX;
void *implementor = generic->GetValue();
if (!implementor)
return UINT32_MAX;
int ret_val = UINT32_MAX;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPython_GetIndexOfChildWithName(implementor, child_name);
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::UpdateSynthProviderInstance(
const StructuredData::ObjectSP &implementor_sp) {
bool ret_val = false;
if (!implementor_sp)
return ret_val;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return ret_val;
void *implementor = generic->GetValue();
if (!implementor)
return ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSwigPython_UpdateSynthProviderInstance(implementor);
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::MightHaveChildrenSynthProviderInstance(
const StructuredData::ObjectSP &implementor_sp) {
bool ret_val = false;
if (!implementor_sp)
return ret_val;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return ret_val;
void *implementor = generic->GetValue();
if (!implementor)
return ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val =
LLDBSwigPython_MightHaveChildrenSynthProviderInstance(implementor);
}
return ret_val;
}
lldb::ValueObjectSP ScriptInterpreterPythonImpl::GetSyntheticValue(
const StructuredData::ObjectSP &implementor_sp) {
lldb::ValueObjectSP ret_val(nullptr);
if (!implementor_sp)
return ret_val;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return ret_val;
void *implementor = generic->GetValue();
if (!implementor)
return ret_val;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
void *child_ptr = LLDBSwigPython_GetValueSynthProviderInstance(implementor);
if (child_ptr != nullptr && child_ptr != Py_None) {
lldb::SBValue *sb_value_ptr =
(lldb::SBValue *)LLDBSWIGPython_CastPyObjectToSBValue(child_ptr);
if (sb_value_ptr == nullptr)
Py_XDECREF(child_ptr);
else
ret_val = LLDBSWIGPython_GetValueObjectSPFromSBValue(sb_value_ptr);
} else {
Py_XDECREF(child_ptr);
}
}
return ret_val;
}
ConstString ScriptInterpreterPythonImpl::GetSyntheticTypeName(
const StructuredData::ObjectSP &implementor_sp) {
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
static char callee_name[] = "get_type_name";
ConstString ret_val;
bool got_string = false;
std::string buffer;
if (!implementor_sp)
return ret_val;
StructuredData::Generic *generic = implementor_sp->GetAsGeneric();
if (!generic)
return ret_val;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)generic->GetValue());
if (!implementor.IsAllocated())
return ret_val;
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return ret_val;
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return ret_val;
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.IsAllocated() && PythonString::Check(py_return.get())) {
PythonString py_string(PyRefType::Borrowed, py_return.get());
llvm::StringRef return_data(py_string.GetString());
if (!return_data.empty()) {
buffer.assign(return_data.data(), return_data.size());
got_string = true;
}
}
if (got_string)
ret_val.SetCStringWithLength(buffer.c_str(), buffer.size());
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptFormatKeyword(
const char *impl_function, Process *process, std::string &output,
Status &error) {
bool ret_val;
if (!process) {
error.SetErrorString("no process");
return false;
}
if (!impl_function || !impl_function[0]) {
error.SetErrorString("no function to execute");
return false;
}
{
ProcessSP process_sp(process->shared_from_this());
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSWIGPythonRunScriptKeywordProcess(
impl_function, m_dictionary_name.c_str(), process_sp, output);
if (!ret_val)
error.SetErrorString("python script evaluation failed");
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptFormatKeyword(
const char *impl_function, Thread *thread, std::string &output,
Status &error) {
bool ret_val;
if (!thread) {
error.SetErrorString("no thread");
return false;
}
if (!impl_function || !impl_function[0]) {
error.SetErrorString("no function to execute");
return false;
}
{
ThreadSP thread_sp(thread->shared_from_this());
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSWIGPythonRunScriptKeywordThread(
impl_function, m_dictionary_name.c_str(), thread_sp, output);
if (!ret_val)
error.SetErrorString("python script evaluation failed");
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptFormatKeyword(
const char *impl_function, Target *target, std::string &output,
Status &error) {
bool ret_val;
if (!target) {
error.SetErrorString("no thread");
return false;
}
if (!impl_function || !impl_function[0]) {
error.SetErrorString("no function to execute");
return false;
}
{
TargetSP target_sp(target->shared_from_this());
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSWIGPythonRunScriptKeywordTarget(
impl_function, m_dictionary_name.c_str(), target_sp, output);
if (!ret_val)
error.SetErrorString("python script evaluation failed");
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptFormatKeyword(
const char *impl_function, StackFrame *frame, std::string &output,
Status &error) {
bool ret_val;
if (!frame) {
error.SetErrorString("no frame");
return false;
}
if (!impl_function || !impl_function[0]) {
error.SetErrorString("no function to execute");
return false;
}
{
StackFrameSP frame_sp(frame->shared_from_this());
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSWIGPythonRunScriptKeywordFrame(
impl_function, m_dictionary_name.c_str(), frame_sp, output);
if (!ret_val)
error.SetErrorString("python script evaluation failed");
}
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptFormatKeyword(
const char *impl_function, ValueObject *value, std::string &output,
Status &error) {
bool ret_val;
if (!value) {
error.SetErrorString("no value");
return false;
}
if (!impl_function || !impl_function[0]) {
error.SetErrorString("no function to execute");
return false;
}
{
ValueObjectSP value_sp(value->GetSP());
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
ret_val = LLDBSWIGPythonRunScriptKeywordValue(
impl_function, m_dictionary_name.c_str(), value_sp, output);
if (!ret_val)
error.SetErrorString("python script evaluation failed");
}
return ret_val;
}
uint64_t replace_all(std::string &str, const std::string &oldStr,
const std::string &newStr) {
size_t pos = 0;
uint64_t matches = 0;
while ((pos = str.find(oldStr, pos)) != std::string::npos) {
matches++;
str.replace(pos, oldStr.length(), newStr);
pos += newStr.length();
}
return matches;
}
bool ScriptInterpreterPythonImpl::LoadScriptingModule(
const char *pathname, bool can_reload, bool init_session,
lldb_private::Status &error, StructuredData::ObjectSP *module_sp) {
if (!pathname || !pathname[0]) {
error.SetErrorString("invalid pathname");
return false;
}
lldb::DebuggerSP debugger_sp = m_debugger.shared_from_this();
{
FileSpec target_file(pathname);
FileSystem::Instance().Resolve(target_file);
std::string basename(target_file.GetFilename().GetCString());
StreamString command_stream;
// Before executing Python code, lock the GIL.
Locker py_lock(this,
Locker::AcquireLock |
(init_session ? Locker::InitSession : 0) |
Locker::NoSTDIN,
Locker::FreeAcquiredLock |
(init_session ? Locker::TearDownSession : 0));
namespace fs = llvm::sys::fs;
fs::file_status st;
std::error_code ec = status(target_file.GetPath(), st);
if (ec || st.type() == fs::file_type::status_error ||
st.type() == fs::file_type::type_unknown ||
st.type() == fs::file_type::file_not_found) {
// if not a valid file of any sort, check if it might be a filename still
// dot can't be used but / and \ can, and if either is found, reject
if (strchr(pathname, '\\') || strchr(pathname, '/')) {
error.SetErrorString("invalid pathname");
return false;
}
basename = pathname; // not a filename, probably a package of some sort,
// let it go through
} else if (is_directory(st) || is_regular_file(st)) {
if (target_file.GetDirectory().IsEmpty()) {
error.SetErrorString("invalid directory name");
return false;
}
std::string directory = target_file.GetDirectory().GetCString();
replace_all(directory, "\\", "\\\\");
replace_all(directory, "'", "\\'");
// now make sure that Python has "directory" in the search path
StreamString command_stream;
command_stream.Printf("if not (sys.path.__contains__('%s')):\n "
"sys.path.insert(1,'%s');\n\n",
directory.c_str(), directory.c_str());
bool syspath_retval =
ExecuteMultipleLines(command_stream.GetData(),
ScriptInterpreter::ExecuteScriptOptions()
.SetEnableIO(false)
.SetSetLLDBGlobals(false))
.Success();
if (!syspath_retval) {
error.SetErrorString("Python sys.path handling failed");
return false;
}
// strip .py or .pyc extension
ConstString extension = target_file.GetFileNameExtension();
if (extension) {
if (llvm::StringRef(extension.GetCString()) == ".py")
basename.resize(basename.length() - 3);
else if (llvm::StringRef(extension.GetCString()) == ".pyc")
basename.resize(basename.length() - 4);
}
} else {
error.SetErrorString("no known way to import this module specification");
return false;
}
// check if the module is already import-ed
command_stream.Clear();
command_stream.Printf("sys.modules.__contains__('%s')", basename.c_str());
bool does_contain = false;
// this call will succeed if the module was ever imported in any Debugger
// in the lifetime of the process in which this LLDB framework is living
bool was_imported_globally =
(ExecuteOneLineWithReturn(
command_stream.GetData(),
ScriptInterpreterPythonImpl::eScriptReturnTypeBool, &does_contain,
ScriptInterpreter::ExecuteScriptOptions()
.SetEnableIO(false)
.SetSetLLDBGlobals(false)) &&
does_contain);
// this call will fail if the module was not imported in this Debugger
// before
command_stream.Clear();
command_stream.Printf("sys.getrefcount(%s)", basename.c_str());
bool was_imported_locally = GetSessionDictionary()
.GetItemForKey(PythonString(basename))
.IsAllocated();
bool was_imported = (was_imported_globally || was_imported_locally);
if (was_imported && !can_reload) {
error.SetErrorString("module already imported");
return false;
}
// now actually do the import
command_stream.Clear();
if (was_imported) {
if (!was_imported_locally)
command_stream.Printf("import %s ; reload_module(%s)", basename.c_str(),
basename.c_str());
else
command_stream.Printf("reload_module(%s)", basename.c_str());
} else
command_stream.Printf("import %s", basename.c_str());
error = ExecuteMultipleLines(command_stream.GetData(),
ScriptInterpreter::ExecuteScriptOptions()
.SetEnableIO(false)
.SetSetLLDBGlobals(false));
if (error.Fail())
return false;
// if we are here, everything worked
// call __lldb_init_module(debugger,dict)
if (!LLDBSwigPythonCallModuleInit(basename.c_str(),
m_dictionary_name.c_str(), debugger_sp)) {
error.SetErrorString("calling __lldb_init_module failed");
return false;
}
if (module_sp) {
// everything went just great, now set the module object
command_stream.Clear();
command_stream.Printf("%s", basename.c_str());
void *module_pyobj = nullptr;
if (ExecuteOneLineWithReturn(
command_stream.GetData(),
ScriptInterpreter::eScriptReturnTypeOpaqueObject,
&module_pyobj) &&
module_pyobj)
*module_sp = std::make_shared<StructuredPythonObject>(module_pyobj);
}
return true;
}
}
bool ScriptInterpreterPythonImpl::IsReservedWord(const char *word) {
if (!word || !word[0])
return false;
llvm::StringRef word_sr(word);
// filter out a few characters that would just confuse us and that are
// clearly not keyword material anyway
if (word_sr.find('"') != llvm::StringRef::npos ||
word_sr.find('\'') != llvm::StringRef::npos)
return false;
StreamString command_stream;
command_stream.Printf("keyword.iskeyword('%s')", word);
bool result;
ExecuteScriptOptions options;
options.SetEnableIO(false);
options.SetMaskoutErrors(true);
options.SetSetLLDBGlobals(false);
if (ExecuteOneLineWithReturn(command_stream.GetData(),
ScriptInterpreter::eScriptReturnTypeBool,
&result, options))
return result;
return false;
}
ScriptInterpreterPythonImpl::SynchronicityHandler::SynchronicityHandler(
lldb::DebuggerSP debugger_sp, ScriptedCommandSynchronicity synchro)
: m_debugger_sp(debugger_sp), m_synch_wanted(synchro),
m_old_asynch(debugger_sp->GetAsyncExecution()) {
if (m_synch_wanted == eScriptedCommandSynchronicitySynchronous)
m_debugger_sp->SetAsyncExecution(false);
else if (m_synch_wanted == eScriptedCommandSynchronicityAsynchronous)
m_debugger_sp->SetAsyncExecution(true);
}
ScriptInterpreterPythonImpl::SynchronicityHandler::~SynchronicityHandler() {
if (m_synch_wanted != eScriptedCommandSynchronicityCurrentValue)
m_debugger_sp->SetAsyncExecution(m_old_asynch);
}
bool ScriptInterpreterPythonImpl::RunScriptBasedCommand(
const char *impl_function, llvm::StringRef args,
ScriptedCommandSynchronicity synchronicity,
lldb_private::CommandReturnObject &cmd_retobj, Status &error,
const lldb_private::ExecutionContext &exe_ctx) {
if (!impl_function) {
error.SetErrorString("no function to execute");
return false;
}
lldb::DebuggerSP debugger_sp = m_debugger.shared_from_this();
lldb::ExecutionContextRefSP exe_ctx_ref_sp(new ExecutionContextRef(exe_ctx));
if (!debugger_sp.get()) {
error.SetErrorString("invalid Debugger pointer");
return false;
}
bool ret_val = false;
std::string err_msg;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession |
(cmd_retobj.GetInteractive() ? 0 : Locker::NoSTDIN),
Locker::FreeLock | Locker::TearDownSession);
SynchronicityHandler synch_handler(debugger_sp, synchronicity);
std::string args_str = args.str();
ret_val = LLDBSwigPythonCallCommand(
impl_function, m_dictionary_name.c_str(), debugger_sp, args_str.c_str(),
cmd_retobj, exe_ctx_ref_sp);
}
if (!ret_val)
error.SetErrorString("unable to execute script function");
else
error.Clear();
return ret_val;
}
bool ScriptInterpreterPythonImpl::RunScriptBasedCommand(
StructuredData::GenericSP impl_obj_sp, llvm::StringRef args,
ScriptedCommandSynchronicity synchronicity,
lldb_private::CommandReturnObject &cmd_retobj, Status &error,
const lldb_private::ExecutionContext &exe_ctx) {
if (!impl_obj_sp || !impl_obj_sp->IsValid()) {
error.SetErrorString("no function to execute");
return false;
}
lldb::DebuggerSP debugger_sp = m_debugger.shared_from_this();
lldb::ExecutionContextRefSP exe_ctx_ref_sp(new ExecutionContextRef(exe_ctx));
if (!debugger_sp.get()) {
error.SetErrorString("invalid Debugger pointer");
return false;
}
bool ret_val = false;
std::string err_msg;
{
Locker py_lock(this,
Locker::AcquireLock | Locker::InitSession |
(cmd_retobj.GetInteractive() ? 0 : Locker::NoSTDIN),
Locker::FreeLock | Locker::TearDownSession);
SynchronicityHandler synch_handler(debugger_sp, synchronicity);
std::string args_str = args.str();
ret_val = LLDBSwigPythonCallCommandObject(impl_obj_sp->GetValue(),
debugger_sp, args_str.c_str(),
cmd_retobj, exe_ctx_ref_sp);
}
if (!ret_val)
error.SetErrorString("unable to execute script function");
else
error.Clear();
return ret_val;
}
// in Python, a special attribute __doc__ contains the docstring for an object
// (function, method, class, ...) if any is defined Otherwise, the attribute's
// value is None
bool ScriptInterpreterPythonImpl::GetDocumentationForItem(const char *item,
std::string &dest) {
dest.clear();
if (!item || !*item)
return false;
std::string command(item);
command += ".__doc__";
char *result_ptr = nullptr; // Python is going to point this to valid data if
// ExecuteOneLineWithReturn returns successfully
if (ExecuteOneLineWithReturn(
command.c_str(), ScriptInterpreter::eScriptReturnTypeCharStrOrNone,
&result_ptr,
ScriptInterpreter::ExecuteScriptOptions().SetEnableIO(false))) {
if (result_ptr)
dest.assign(result_ptr);
return true;
} else {
StreamString str_stream;
str_stream.Printf(
"Function %s was not found. Containing module might be missing.", item);
dest = str_stream.GetString();
return false;
}
}
bool ScriptInterpreterPythonImpl::GetShortHelpForCommandObject(
StructuredData::GenericSP cmd_obj_sp, std::string &dest) {
bool got_string = false;
dest.clear();
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_short_help";
if (!cmd_obj_sp)
return false;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)cmd_obj_sp->GetValue());
if (!implementor.IsAllocated())
return false;
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return false;
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return false;
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.IsAllocated() && PythonString::Check(py_return.get())) {
PythonString py_string(PyRefType::Borrowed, py_return.get());
llvm::StringRef return_data(py_string.GetString());
dest.assign(return_data.data(), return_data.size());
got_string = true;
}
return got_string;
}
uint32_t ScriptInterpreterPythonImpl::GetFlagsForCommandObject(
StructuredData::GenericSP cmd_obj_sp) {
uint32_t result = 0;
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_flags";
if (!cmd_obj_sp)
return result;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)cmd_obj_sp->GetValue());
if (!implementor.IsAllocated())
return result;
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return result;
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return result;
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.IsAllocated() && PythonInteger::Check(py_return.get())) {
PythonInteger int_value(PyRefType::Borrowed, py_return.get());
result = int_value.GetInteger();
}
return result;
}
bool ScriptInterpreterPythonImpl::GetLongHelpForCommandObject(
StructuredData::GenericSP cmd_obj_sp, std::string &dest) {
bool got_string = false;
dest.clear();
Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
static char callee_name[] = "get_long_help";
if (!cmd_obj_sp)
return false;
PythonObject implementor(PyRefType::Borrowed,
(PyObject *)cmd_obj_sp->GetValue());
if (!implementor.IsAllocated())
return false;
PythonObject pmeth(PyRefType::Owned,
PyObject_GetAttrString(implementor.get(), callee_name));
if (PyErr_Occurred())
PyErr_Clear();
if (!pmeth.IsAllocated())
return false;
if (PyCallable_Check(pmeth.get()) == 0) {
if (PyErr_Occurred())
PyErr_Clear();
return false;
}
if (PyErr_Occurred())
PyErr_Clear();
// right now we know this function exists and is callable..
PythonObject py_return(
PyRefType::Owned,
PyObject_CallMethod(implementor.get(), callee_name, nullptr));
// if it fails, print the error but otherwise go on
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
}
if (py_return.IsAllocated() && PythonString::Check(py_return.get())) {
PythonString str(PyRefType::Borrowed, py_return.get());
llvm::StringRef str_data(str.GetString());
dest.assign(str_data.data(), str_data.size());
got_string = true;
}
return got_string;
}
std::unique_ptr<ScriptInterpreterLocker>
ScriptInterpreterPythonImpl::AcquireInterpreterLock() {
std::unique_ptr<ScriptInterpreterLocker> py_lock(new Locker(
this, Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN,
Locker::FreeLock | Locker::TearDownSession));
return py_lock;
}
void ScriptInterpreterPythonImpl::InitializePrivate() {
if (g_initialized)
return;
g_initialized = true;
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, LLVM_PRETTY_FUNCTION);
// RAII-based initialization which correctly handles multiple-initialization,
// version- specific differences among Python 2 and Python 3, and saving and
// restoring various other pieces of state that can get mucked with during
// initialization.
InitializePythonRAII initialize_guard;
LLDBSwigPyInit();
// Update the path python uses to search for modules to include the current
// directory.
PyRun_SimpleString("import sys");
AddToSysPath(AddLocation::End, ".");
// Don't denormalize paths when calling file_spec.GetPath(). On platforms
// that use a backslash as the path separator, this will result in executing
// python code containing paths with unescaped backslashes. But Python also
// accepts forward slashes, so to make life easier we just use that.
if (FileSpec file_spec = GetPythonDir())
AddToSysPath(AddLocation::Beginning, file_spec.GetPath(false));
if (FileSpec file_spec = HostInfo::GetShlibDir())
AddToSysPath(AddLocation::Beginning, file_spec.GetPath(false));
PyRun_SimpleString("sys.dont_write_bytecode = 1; import "
"lldb.embedded_interpreter; from "
"lldb.embedded_interpreter import run_python_interpreter; "
"from lldb.embedded_interpreter import run_one_line");
}
void ScriptInterpreterPythonImpl::AddToSysPath(AddLocation location,
std::string path) {
std::string path_copy;
std::string statement;
if (location == AddLocation::Beginning) {
statement.assign("sys.path.insert(0,\"");
statement.append(path);
statement.append("\")");
} else {
statement.assign("sys.path.append(\"");
statement.append(path);
statement.append("\")");
}
PyRun_SimpleString(statement.c_str());
}
// We are intentionally NOT calling Py_Finalize here (this would be the logical
// place to call it). Calling Py_Finalize here causes test suite runs to seg
// fault: The test suite runs in Python. It registers SBDebugger::Terminate to
// be called 'at_exit'. When the test suite Python harness finishes up, it
// calls Py_Finalize, which calls all the 'at_exit' registered functions.
// SBDebugger::Terminate calls Debugger::Terminate, which calls lldb::Terminate,
// which calls ScriptInterpreter::Terminate, which calls
// ScriptInterpreterPythonImpl::Terminate. So if we call Py_Finalize here, we
// end up with Py_Finalize being called from within Py_Finalize, which results
// in a seg fault. Since this function only gets called when lldb is shutting
// down and going away anyway, the fact that we don't actually call Py_Finalize
// should not cause any problems (everything should shut down/go away anyway
// when the process exits).
//
// void ScriptInterpreterPythonImpl::Terminate() { Py_Finalize (); }
#endif // LLDB_DISABLE_PYTHON
Index: stable/12/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp
===================================================================
--- stable/12/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp (revision 356465)
@@ -1,1129 +1,1123 @@
//===-- Symtab.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <map>
#include <set>
#include "Plugins/Language/ObjC/ObjCLanguage.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/RichManglingContext.h"
#include "lldb/Core/STLUtils.h"
#include "lldb/Core/Section.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/Symbol.h"
#include "lldb/Symbol/SymbolContext.h"
#include "lldb/Symbol/Symtab.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/Timer.h"
#include "llvm/ADT/StringRef.h"
using namespace lldb;
using namespace lldb_private;
Symtab::Symtab(ObjectFile *objfile)
: m_objfile(objfile), m_symbols(), m_file_addr_to_index(),
m_name_to_index(), m_mutex(), m_file_addr_to_index_computed(false),
m_name_indexes_computed(false) {}
Symtab::~Symtab() {}
void Symtab::Reserve(size_t count) {
// Clients should grab the mutex from this symbol table and lock it manually
// when calling this function to avoid performance issues.
m_symbols.reserve(count);
}
Symbol *Symtab::Resize(size_t count) {
// Clients should grab the mutex from this symbol table and lock it manually
// when calling this function to avoid performance issues.
m_symbols.resize(count);
return m_symbols.empty() ? nullptr : &m_symbols[0];
}
uint32_t Symtab::AddSymbol(const Symbol &symbol) {
// Clients should grab the mutex from this symbol table and lock it manually
// when calling this function to avoid performance issues.
uint32_t symbol_idx = m_symbols.size();
m_name_to_index.Clear();
m_file_addr_to_index.Clear();
m_symbols.push_back(symbol);
m_file_addr_to_index_computed = false;
m_name_indexes_computed = false;
return symbol_idx;
}
size_t Symtab::GetNumSymbols() const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
return m_symbols.size();
}
void Symtab::SectionFileAddressesChanged() {
m_name_to_index.Clear();
m_file_addr_to_index_computed = false;
}
void Symtab::Dump(Stream *s, Target *target, SortOrder sort_order) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
// s->Printf("%.*p: ", (int)sizeof(void*) * 2, this);
s->Indent();
const FileSpec &file_spec = m_objfile->GetFileSpec();
const char *object_name = nullptr;
if (m_objfile->GetModule())
object_name = m_objfile->GetModule()->GetObjectName().GetCString();
if (file_spec)
s->Printf("Symtab, file = %s%s%s%s, num_symbols = %" PRIu64,
file_spec.GetPath().c_str(), object_name ? "(" : "",
object_name ? object_name : "", object_name ? ")" : "",
(uint64_t)m_symbols.size());
else
s->Printf("Symtab, num_symbols = %" PRIu64 "", (uint64_t)m_symbols.size());
if (!m_symbols.empty()) {
switch (sort_order) {
case eSortOrderNone: {
s->PutCString(":\n");
DumpSymbolHeader(s);
const_iterator begin = m_symbols.begin();
const_iterator end = m_symbols.end();
for (const_iterator pos = m_symbols.begin(); pos != end; ++pos) {
s->Indent();
pos->Dump(s, target, std::distance(begin, pos));
}
} break;
case eSortOrderByName: {
// Although we maintain a lookup by exact name map, the table isn't
// sorted by name. So we must make the ordered symbol list up ourselves.
s->PutCString(" (sorted by name):\n");
DumpSymbolHeader(s);
typedef std::multimap<const char *, const Symbol *,
CStringCompareFunctionObject>
CStringToSymbol;
CStringToSymbol name_map;
for (const_iterator pos = m_symbols.begin(), end = m_symbols.end();
pos != end; ++pos) {
const char *name = pos->GetName().AsCString();
if (name && name[0])
name_map.insert(std::make_pair(name, &(*pos)));
}
for (CStringToSymbol::const_iterator pos = name_map.begin(),
end = name_map.end();
pos != end; ++pos) {
s->Indent();
pos->second->Dump(s, target, pos->second - &m_symbols[0]);
}
} break;
case eSortOrderByAddress:
s->PutCString(" (sorted by address):\n");
DumpSymbolHeader(s);
if (!m_file_addr_to_index_computed)
InitAddressIndexes();
const size_t num_entries = m_file_addr_to_index.GetSize();
for (size_t i = 0; i < num_entries; ++i) {
s->Indent();
const uint32_t symbol_idx = m_file_addr_to_index.GetEntryRef(i).data;
m_symbols[symbol_idx].Dump(s, target, symbol_idx);
}
break;
}
} else {
s->PutCString("\n");
}
}
void Symtab::Dump(Stream *s, Target *target,
std::vector<uint32_t> &indexes) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
const size_t num_symbols = GetNumSymbols();
// s->Printf("%.*p: ", (int)sizeof(void*) * 2, this);
s->Indent();
s->Printf("Symtab %" PRIu64 " symbol indexes (%" PRIu64 " symbols total):\n",
(uint64_t)indexes.size(), (uint64_t)m_symbols.size());
s->IndentMore();
if (!indexes.empty()) {
std::vector<uint32_t>::const_iterator pos;
std::vector<uint32_t>::const_iterator end = indexes.end();
DumpSymbolHeader(s);
for (pos = indexes.begin(); pos != end; ++pos) {
size_t idx = *pos;
if (idx < num_symbols) {
s->Indent();
m_symbols[idx].Dump(s, target, idx);
}
}
}
s->IndentLess();
}
void Symtab::DumpSymbolHeader(Stream *s) {
s->Indent(" Debug symbol\n");
s->Indent(" |Synthetic symbol\n");
s->Indent(" ||Externally Visible\n");
s->Indent(" |||\n");
s->Indent("Index UserID DSX Type File Address/Value Load "
"Address Size Flags Name\n");
s->Indent("------- ------ --- --------------- ------------------ "
"------------------ ------------------ ---------- "
"----------------------------------\n");
}
static int CompareSymbolID(const void *key, const void *p) {
const user_id_t match_uid = *(const user_id_t *)key;
const user_id_t symbol_uid = ((const Symbol *)p)->GetID();
if (match_uid < symbol_uid)
return -1;
if (match_uid > symbol_uid)
return 1;
return 0;
}
Symbol *Symtab::FindSymbolByID(lldb::user_id_t symbol_uid) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
Symbol *symbol =
(Symbol *)::bsearch(&symbol_uid, &m_symbols[0], m_symbols.size(),
sizeof(m_symbols[0]), CompareSymbolID);
return symbol;
}
Symbol *Symtab::SymbolAtIndex(size_t idx) {
// Clients should grab the mutex from this symbol table and lock it manually
// when calling this function to avoid performance issues.
if (idx < m_symbols.size())
return &m_symbols[idx];
return nullptr;
}
const Symbol *Symtab::SymbolAtIndex(size_t idx) const {
// Clients should grab the mutex from this symbol table and lock it manually
// when calling this function to avoid performance issues.
if (idx < m_symbols.size())
return &m_symbols[idx];
return nullptr;
}
static bool lldb_skip_name(llvm::StringRef mangled,
Mangled::ManglingScheme scheme) {
switch (scheme) {
case Mangled::eManglingSchemeItanium: {
if (mangled.size() < 3 || !mangled.startswith("_Z"))
return true;
// Avoid the following types of symbols in the index.
switch (mangled[2]) {
case 'G': // guard variables
case 'T': // virtual tables, VTT structures, typeinfo structures + names
case 'Z': // named local entities (if we eventually handle
// eSymbolTypeData, we will want this back)
return true;
default:
break;
}
// Include this name in the index.
return false;
}
// No filters for this scheme yet. Include all names in indexing.
case Mangled::eManglingSchemeMSVC:
return false;
// Don't try and demangle things we can't categorize.
case Mangled::eManglingSchemeNone:
return true;
}
llvm_unreachable("unknown scheme!");
}
void Symtab::InitNameIndexes() {
// Protected function, no need to lock mutex...
if (!m_name_indexes_computed) {
m_name_indexes_computed = true;
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
// Create the name index vector to be able to quickly search by name
const size_t num_symbols = m_symbols.size();
m_name_to_index.Reserve(num_symbols);
// The "const char *" in "class_contexts" and backlog::value_type::second
// must come from a ConstString::GetCString()
std::set<const char *> class_contexts;
std::vector<std::pair<NameToIndexMap::Entry, const char *>> backlog;
backlog.reserve(num_symbols / 2);
// Instantiation of the demangler is expensive, so better use a single one
// for all entries during batch processing.
RichManglingContext rmc;
for (uint32_t value = 0; value < num_symbols; ++value) {
Symbol *symbol = &m_symbols[value];
// Don't let trampolines get into the lookup by name map If we ever need
// the trampoline symbols to be searchable by name we can remove this and
// then possibly add a new bool to any of the Symtab functions that
// lookup symbols by name to indicate if they want trampolines.
if (symbol->IsTrampoline())
continue;
// If the symbol's name string matched a Mangled::ManglingScheme, it is
// stored in the mangled field.
Mangled &mangled = symbol->GetMangled();
if (ConstString name = mangled.GetMangledName()) {
m_name_to_index.Append(name, value);
if (symbol->ContainsLinkerAnnotations()) {
// If the symbol has linker annotations, also add the version without
// the annotations.
ConstString stripped = ConstString(
m_objfile->StripLinkerSymbolAnnotations(name.GetStringRef()));
m_name_to_index.Append(stripped, value);
}
const SymbolType type = symbol->GetType();
if (type == eSymbolTypeCode || type == eSymbolTypeResolver) {
if (mangled.DemangleWithRichManglingInfo(rmc, lldb_skip_name))
RegisterMangledNameEntry(value, class_contexts, backlog, rmc);
}
}
// Symbol name strings that didn't match a Mangled::ManglingScheme, are
// stored in the demangled field.
if (ConstString name = mangled.GetDemangledName(symbol->GetLanguage())) {
m_name_to_index.Append(name, value);
if (symbol->ContainsLinkerAnnotations()) {
// If the symbol has linker annotations, also add the version without
// the annotations.
name = ConstString(
m_objfile->StripLinkerSymbolAnnotations(name.GetStringRef()));
m_name_to_index.Append(name, value);
}
// If the demangled name turns out to be an ObjC name, and is a category
// name, add the version without categories to the index too.
ObjCLanguage::MethodName objc_method(name.GetStringRef(), true);
if (objc_method.IsValid(true)) {
m_selector_to_index.Append(objc_method.GetSelector(), value);
if (ConstString objc_method_no_category =
objc_method.GetFullNameWithoutCategory(true))
m_name_to_index.Append(objc_method_no_category, value);
}
}
}
for (const auto &record : backlog) {
RegisterBacklogEntry(record.first, record.second, class_contexts);
}
m_name_to_index.Sort();
m_name_to_index.SizeToFit();
m_selector_to_index.Sort();
m_selector_to_index.SizeToFit();
m_basename_to_index.Sort();
m_basename_to_index.SizeToFit();
m_method_to_index.Sort();
m_method_to_index.SizeToFit();
}
}
void Symtab::RegisterMangledNameEntry(
uint32_t value, std::set<const char *> &class_contexts,
std::vector<std::pair<NameToIndexMap::Entry, const char *>> &backlog,
RichManglingContext &rmc) {
// Only register functions that have a base name.
rmc.ParseFunctionBaseName();
llvm::StringRef base_name = rmc.GetBufferRef();
if (base_name.empty())
return;
// The base name will be our entry's name.
NameToIndexMap::Entry entry(ConstString(base_name), value);
rmc.ParseFunctionDeclContextName();
llvm::StringRef decl_context = rmc.GetBufferRef();
// Register functions with no context.
if (decl_context.empty()) {
// This has to be a basename
m_basename_to_index.Append(entry);
// If there is no context (no namespaces or class scopes that come before
// the function name) then this also could be a fullname.
m_name_to_index.Append(entry);
return;
}
// Make sure we have a pool-string pointer and see if we already know the
// context name.
const char *decl_context_ccstr = ConstString(decl_context).GetCString();
auto it = class_contexts.find(decl_context_ccstr);
// Register constructors and destructors. They are methods and create
// declaration contexts.
if (rmc.IsCtorOrDtor()) {
m_method_to_index.Append(entry);
if (it == class_contexts.end())
class_contexts.insert(it, decl_context_ccstr);
return;
}
// Register regular methods with a known declaration context.
if (it != class_contexts.end()) {
m_method_to_index.Append(entry);
return;
}
// Regular methods in unknown declaration contexts are put to the backlog. We
// will revisit them once we processed all remaining symbols.
backlog.push_back(std::make_pair(entry, decl_context_ccstr));
}
void Symtab::RegisterBacklogEntry(
const NameToIndexMap::Entry &entry, const char *decl_context,
const std::set<const char *> &class_contexts) {
auto it = class_contexts.find(decl_context);
if (it != class_contexts.end()) {
m_method_to_index.Append(entry);
} else {
// If we got here, we have something that had a context (was inside
// a namespace or class) yet we don't know the entry
m_method_to_index.Append(entry);
m_basename_to_index.Append(entry);
}
}
void Symtab::PreloadSymbols() {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
InitNameIndexes();
}
void Symtab::AppendSymbolNamesToMap(const IndexCollection &indexes,
bool add_demangled, bool add_mangled,
NameToIndexMap &name_to_index_map) const {
if (add_demangled || add_mangled) {
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
std::lock_guard<std::recursive_mutex> guard(m_mutex);
// Create the name index vector to be able to quickly search by name
const size_t num_indexes = indexes.size();
for (size_t i = 0; i < num_indexes; ++i) {
uint32_t value = indexes[i];
assert(i < m_symbols.size());
const Symbol *symbol = &m_symbols[value];
const Mangled &mangled = symbol->GetMangled();
if (add_demangled) {
if (ConstString name = mangled.GetDemangledName(symbol->GetLanguage()))
name_to_index_map.Append(name, value);
}
if (add_mangled) {
if (ConstString name = mangled.GetMangledName())
name_to_index_map.Append(name, value);
}
}
}
}
uint32_t Symtab::AppendSymbolIndexesWithType(SymbolType symbol_type,
std::vector<uint32_t> &indexes,
uint32_t start_idx,
uint32_t end_index) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
uint32_t prev_size = indexes.size();
const uint32_t count = std::min<uint32_t>(m_symbols.size(), end_index);
for (uint32_t i = start_idx; i < count; ++i) {
if (symbol_type == eSymbolTypeAny || m_symbols[i].GetType() == symbol_type)
indexes.push_back(i);
}
return indexes.size() - prev_size;
}
uint32_t Symtab::AppendSymbolIndexesWithTypeAndFlagsValue(
SymbolType symbol_type, uint32_t flags_value,
std::vector<uint32_t> &indexes, uint32_t start_idx,
uint32_t end_index) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
uint32_t prev_size = indexes.size();
const uint32_t count = std::min<uint32_t>(m_symbols.size(), end_index);
for (uint32_t i = start_idx; i < count; ++i) {
if ((symbol_type == eSymbolTypeAny ||
m_symbols[i].GetType() == symbol_type) &&
m_symbols[i].GetFlags() == flags_value)
indexes.push_back(i);
}
return indexes.size() - prev_size;
}
uint32_t Symtab::AppendSymbolIndexesWithType(SymbolType symbol_type,
Debug symbol_debug_type,
Visibility symbol_visibility,
std::vector<uint32_t> &indexes,
uint32_t start_idx,
uint32_t end_index) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
uint32_t prev_size = indexes.size();
const uint32_t count = std::min<uint32_t>(m_symbols.size(), end_index);
for (uint32_t i = start_idx; i < count; ++i) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[i].GetType() == symbol_type) {
if (CheckSymbolAtIndex(i, symbol_debug_type, symbol_visibility))
indexes.push_back(i);
}
}
return indexes.size() - prev_size;
}
uint32_t Symtab::GetIndexForSymbol(const Symbol *symbol) const {
if (!m_symbols.empty()) {
const Symbol *first_symbol = &m_symbols[0];
if (symbol >= first_symbol && symbol < first_symbol + m_symbols.size())
return symbol - first_symbol;
}
return UINT32_MAX;
}
struct SymbolSortInfo {
const bool sort_by_load_addr;
const Symbol *symbols;
};
namespace {
struct SymbolIndexComparator {
const std::vector<Symbol> &symbols;
std::vector<lldb::addr_t> &addr_cache;
// Getting from the symbol to the Address to the File Address involves some
// work. Since there are potentially many symbols here, and we're using this
// for sorting so we're going to be computing the address many times, cache
// that in addr_cache. The array passed in has to be the same size as the
// symbols array passed into the member variable symbols, and should be
// initialized with LLDB_INVALID_ADDRESS.
// NOTE: You have to make addr_cache externally and pass it in because
// std::stable_sort
// makes copies of the comparator it is initially passed in, and you end up
// spending huge amounts of time copying this array...
SymbolIndexComparator(const std::vector<Symbol> &s,
std::vector<lldb::addr_t> &a)
: symbols(s), addr_cache(a) {
assert(symbols.size() == addr_cache.size());
}
bool operator()(uint32_t index_a, uint32_t index_b) {
addr_t value_a = addr_cache[index_a];
if (value_a == LLDB_INVALID_ADDRESS) {
value_a = symbols[index_a].GetAddressRef().GetFileAddress();
addr_cache[index_a] = value_a;
}
addr_t value_b = addr_cache[index_b];
if (value_b == LLDB_INVALID_ADDRESS) {
value_b = symbols[index_b].GetAddressRef().GetFileAddress();
addr_cache[index_b] = value_b;
}
if (value_a == value_b) {
// The if the values are equal, use the original symbol user ID
lldb::user_id_t uid_a = symbols[index_a].GetID();
lldb::user_id_t uid_b = symbols[index_b].GetID();
if (uid_a < uid_b)
return true;
if (uid_a > uid_b)
return false;
return false;
} else if (value_a < value_b)
return true;
return false;
}
};
}
void Symtab::SortSymbolIndexesByValue(std::vector<uint32_t> &indexes,
bool remove_duplicates) const {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, LLVM_PRETTY_FUNCTION);
// No need to sort if we have zero or one items...
if (indexes.size() <= 1)
return;
// Sort the indexes in place using std::stable_sort.
// NOTE: The use of std::stable_sort instead of llvm::sort here is strictly
// for performance, not correctness. The indexes vector tends to be "close"
// to sorted, which the stable sort handles better.
std::vector<lldb::addr_t> addr_cache(m_symbols.size(), LLDB_INVALID_ADDRESS);
SymbolIndexComparator comparator(m_symbols, addr_cache);
std::stable_sort(indexes.begin(), indexes.end(), comparator);
// Remove any duplicates if requested
if (remove_duplicates) {
auto last = std::unique(indexes.begin(), indexes.end());
indexes.erase(last, indexes.end());
}
}
uint32_t Symtab::AppendSymbolIndexesWithName(ConstString symbol_name,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
if (symbol_name) {
if (!m_name_indexes_computed)
InitNameIndexes();
return m_name_to_index.GetValues(symbol_name, indexes);
}
return 0;
}
uint32_t Symtab::AppendSymbolIndexesWithName(ConstString symbol_name,
Debug symbol_debug_type,
Visibility symbol_visibility,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
if (symbol_name) {
const size_t old_size = indexes.size();
if (!m_name_indexes_computed)
InitNameIndexes();
std::vector<uint32_t> all_name_indexes;
const size_t name_match_count =
m_name_to_index.GetValues(symbol_name, all_name_indexes);
for (size_t i = 0; i < name_match_count; ++i) {
if (CheckSymbolAtIndex(all_name_indexes[i], symbol_debug_type,
symbol_visibility))
indexes.push_back(all_name_indexes[i]);
}
return indexes.size() - old_size;
}
return 0;
}
uint32_t
Symtab::AppendSymbolIndexesWithNameAndType(ConstString symbol_name,
SymbolType symbol_type,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (AppendSymbolIndexesWithName(symbol_name, indexes) > 0) {
std::vector<uint32_t>::iterator pos = indexes.begin();
while (pos != indexes.end()) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[*pos].GetType() == symbol_type)
++pos;
else
pos = indexes.erase(pos);
}
}
return indexes.size();
}
uint32_t Symtab::AppendSymbolIndexesWithNameAndType(
ConstString symbol_name, SymbolType symbol_type,
Debug symbol_debug_type, Visibility symbol_visibility,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (AppendSymbolIndexesWithName(symbol_name, symbol_debug_type,
symbol_visibility, indexes) > 0) {
std::vector<uint32_t>::iterator pos = indexes.begin();
while (pos != indexes.end()) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[*pos].GetType() == symbol_type)
++pos;
else
pos = indexes.erase(pos);
}
}
return indexes.size();
}
uint32_t Symtab::AppendSymbolIndexesMatchingRegExAndType(
const RegularExpression &regexp, SymbolType symbol_type,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
uint32_t prev_size = indexes.size();
uint32_t sym_end = m_symbols.size();
for (uint32_t i = 0; i < sym_end; i++) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[i].GetType() == symbol_type) {
const char *name = m_symbols[i].GetName().AsCString();
if (name) {
if (regexp.Execute(name))
indexes.push_back(i);
}
}
}
return indexes.size() - prev_size;
}
uint32_t Symtab::AppendSymbolIndexesMatchingRegExAndType(
const RegularExpression &regexp, SymbolType symbol_type,
Debug symbol_debug_type, Visibility symbol_visibility,
std::vector<uint32_t> &indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
uint32_t prev_size = indexes.size();
uint32_t sym_end = m_symbols.size();
for (uint32_t i = 0; i < sym_end; i++) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[i].GetType() == symbol_type) {
if (!CheckSymbolAtIndex(i, symbol_debug_type, symbol_visibility))
continue;
const char *name = m_symbols[i].GetName().AsCString();
if (name) {
if (regexp.Execute(name))
indexes.push_back(i);
}
}
}
return indexes.size() - prev_size;
}
Symbol *Symtab::FindSymbolWithType(SymbolType symbol_type,
Debug symbol_debug_type,
Visibility symbol_visibility,
uint32_t &start_idx) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
const size_t count = m_symbols.size();
for (size_t idx = start_idx; idx < count; ++idx) {
if (symbol_type == eSymbolTypeAny ||
m_symbols[idx].GetType() == symbol_type) {
if (CheckSymbolAtIndex(idx, symbol_debug_type, symbol_visibility)) {
start_idx = idx;
return &m_symbols[idx];
}
}
}
return nullptr;
}
size_t
Symtab::FindAllSymbolsWithNameAndType(ConstString name,
SymbolType symbol_type,
std::vector<uint32_t> &symbol_indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
// Initialize all of the lookup by name indexes before converting NAME to a
// uniqued string NAME_STR below.
if (!m_name_indexes_computed)
InitNameIndexes();
if (name) {
// The string table did have a string that matched, but we need to check
// the symbols and match the symbol_type if any was given.
AppendSymbolIndexesWithNameAndType(name, symbol_type, symbol_indexes);
}
return symbol_indexes.size();
}
size_t Symtab::FindAllSymbolsWithNameAndType(
ConstString name, SymbolType symbol_type, Debug symbol_debug_type,
Visibility symbol_visibility, std::vector<uint32_t> &symbol_indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
// Initialize all of the lookup by name indexes before converting NAME to a
// uniqued string NAME_STR below.
if (!m_name_indexes_computed)
InitNameIndexes();
if (name) {
// The string table did have a string that matched, but we need to check
// the symbols and match the symbol_type if any was given.
AppendSymbolIndexesWithNameAndType(name, symbol_type, symbol_debug_type,
symbol_visibility, symbol_indexes);
}
return symbol_indexes.size();
}
size_t Symtab::FindAllSymbolsMatchingRexExAndType(
const RegularExpression &regex, SymbolType symbol_type,
Debug symbol_debug_type, Visibility symbol_visibility,
std::vector<uint32_t> &symbol_indexes) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
AppendSymbolIndexesMatchingRegExAndType(regex, symbol_type, symbol_debug_type,
symbol_visibility, symbol_indexes);
return symbol_indexes.size();
}
Symbol *Symtab::FindFirstSymbolWithNameAndType(ConstString name,
SymbolType symbol_type,
Debug symbol_debug_type,
Visibility symbol_visibility) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
static Timer::Category func_cat(LLVM_PRETTY_FUNCTION);
Timer scoped_timer(func_cat, "%s", LLVM_PRETTY_FUNCTION);
if (!m_name_indexes_computed)
InitNameIndexes();
if (name) {
std::vector<uint32_t> matching_indexes;
// The string table did have a string that matched, but we need to check
// the symbols and match the symbol_type if any was given.
if (AppendSymbolIndexesWithNameAndType(name, symbol_type, symbol_debug_type,
symbol_visibility,
matching_indexes)) {
std::vector<uint32_t>::const_iterator pos, end = matching_indexes.end();
for (pos = matching_indexes.begin(); pos != end; ++pos) {
Symbol *symbol = SymbolAtIndex(*pos);
if (symbol->Compare(name, symbol_type))
return symbol;
}
}
}
return nullptr;
}
typedef struct {
const Symtab *symtab;
const addr_t file_addr;
Symbol *match_symbol;
const uint32_t *match_index_ptr;
addr_t match_offset;
} SymbolSearchInfo;
// Add all the section file start address & size to the RangeVector, recusively
// adding any children sections.
static void AddSectionsToRangeMap(SectionList *sectlist,
RangeVector<addr_t, addr_t> &section_ranges) {
const int num_sections = sectlist->GetNumSections(0);
for (int i = 0; i < num_sections; i++) {
SectionSP sect_sp = sectlist->GetSectionAtIndex(i);
if (sect_sp) {
SectionList &child_sectlist = sect_sp->GetChildren();
// If this section has children, add the children to the RangeVector.
// Else add this section to the RangeVector.
if (child_sectlist.GetNumSections(0) > 0) {
AddSectionsToRangeMap(&child_sectlist, section_ranges);
} else {
size_t size = sect_sp->GetByteSize();
if (size > 0) {
addr_t base_addr = sect_sp->GetFileAddress();
RangeVector<addr_t, addr_t>::Entry entry;
entry.SetRangeBase(base_addr);
entry.SetByteSize(size);
section_ranges.Append(entry);
}
}
}
}
}
void Symtab::InitAddressIndexes() {
// Protected function, no need to lock mutex...
if (!m_file_addr_to_index_computed && !m_symbols.empty()) {
m_file_addr_to_index_computed = true;
FileRangeToIndexMap::Entry entry;
const_iterator begin = m_symbols.begin();
const_iterator end = m_symbols.end();
for (const_iterator pos = m_symbols.begin(); pos != end; ++pos) {
if (pos->ValueIsAddress()) {
entry.SetRangeBase(pos->GetAddressRef().GetFileAddress());
entry.SetByteSize(pos->GetByteSize());
entry.data = std::distance(begin, pos);
m_file_addr_to_index.Append(entry);
}
}
const size_t num_entries = m_file_addr_to_index.GetSize();
if (num_entries > 0) {
m_file_addr_to_index.Sort();
// Create a RangeVector with the start & size of all the sections for
// this objfile. We'll need to check this for any FileRangeToIndexMap
// entries with an uninitialized size, which could potentially be a large
// number so reconstituting the weak pointer is busywork when it is
// invariant information.
SectionList *sectlist = m_objfile->GetSectionList();
RangeVector<addr_t, addr_t> section_ranges;
if (sectlist) {
AddSectionsToRangeMap(sectlist, section_ranges);
section_ranges.Sort();
}
// Iterate through the FileRangeToIndexMap and fill in the size for any
// entries that didn't already have a size from the Symbol (e.g. if we
// have a plain linker symbol with an address only, instead of debug info
// where we get an address and a size and a type, etc.)
for (size_t i = 0; i < num_entries; i++) {
FileRangeToIndexMap::Entry *entry =
m_file_addr_to_index.GetMutableEntryAtIndex(i);
- if (entry->GetByteSize() > 0)
- continue;
- addr_t curr_base_addr = entry->GetRangeBase();
- // Symbols with non-zero size will show after zero-sized symbols on the
- // same address. So do not set size of a non-last zero-sized symbol.
- if (i == num_entries - 1 ||
- m_file_addr_to_index.GetMutableEntryAtIndex(i + 1)
- ->GetRangeBase() != curr_base_addr) {
+ if (entry->GetByteSize() == 0) {
+ addr_t curr_base_addr = entry->GetRangeBase();
const RangeVector<addr_t, addr_t>::Entry *containing_section =
section_ranges.FindEntryThatContains(curr_base_addr);
// Use the end of the section as the default max size of the symbol
addr_t sym_size = 0;
if (containing_section) {
sym_size =
containing_section->GetByteSize() -
(entry->GetRangeBase() - containing_section->GetRangeBase());
}
for (size_t j = i; j < num_entries; j++) {
FileRangeToIndexMap::Entry *next_entry =
m_file_addr_to_index.GetMutableEntryAtIndex(j);
addr_t next_base_addr = next_entry->GetRangeBase();
if (next_base_addr > curr_base_addr) {
addr_t size_to_next_symbol = next_base_addr - curr_base_addr;
// Take the difference between this symbol and the next one as
// its size, if it is less than the size of the section.
if (sym_size == 0 || size_to_next_symbol < sym_size) {
sym_size = size_to_next_symbol;
}
break;
}
}
if (sym_size > 0) {
entry->SetByteSize(sym_size);
Symbol &symbol = m_symbols[entry->data];
symbol.SetByteSize(sym_size);
symbol.SetSizeIsSynthesized(true);
}
}
}
// Sort again in case the range size changes the ordering
m_file_addr_to_index.Sort();
}
}
}
void Symtab::CalculateSymbolSizes() {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
// Size computation happens inside InitAddressIndexes.
InitAddressIndexes();
}
Symbol *Symtab::FindSymbolAtFileAddress(addr_t file_addr) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (!m_file_addr_to_index_computed)
InitAddressIndexes();
const FileRangeToIndexMap::Entry *entry =
m_file_addr_to_index.FindEntryStartsAt(file_addr);
if (entry) {
Symbol *symbol = SymbolAtIndex(entry->data);
if (symbol->GetFileAddress() == file_addr)
return symbol;
}
return nullptr;
}
Symbol *Symtab::FindSymbolContainingFileAddress(addr_t file_addr) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (!m_file_addr_to_index_computed)
InitAddressIndexes();
const FileRangeToIndexMap::Entry *entry =
m_file_addr_to_index.FindEntryThatContains(file_addr);
if (entry) {
Symbol *symbol = SymbolAtIndex(entry->data);
if (symbol->ContainsFileAddress(file_addr))
return symbol;
}
return nullptr;
}
void Symtab::ForEachSymbolContainingFileAddress(
addr_t file_addr, std::function<bool(Symbol *)> const &callback) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (!m_file_addr_to_index_computed)
InitAddressIndexes();
std::vector<uint32_t> all_addr_indexes;
// Get all symbols with file_addr
const size_t addr_match_count =
m_file_addr_to_index.FindEntryIndexesThatContain(file_addr,
all_addr_indexes);
for (size_t i = 0; i < addr_match_count; ++i) {
Symbol *symbol = SymbolAtIndex(all_addr_indexes[i]);
if (symbol->ContainsFileAddress(file_addr)) {
if (!callback(symbol))
break;
}
}
}
void Symtab::SymbolIndicesToSymbolContextList(
std::vector<uint32_t> &symbol_indexes, SymbolContextList &sc_list) {
// No need to protect this call using m_mutex all other method calls are
// already thread safe.
const bool merge_symbol_into_function = true;
size_t num_indices = symbol_indexes.size();
if (num_indices > 0) {
SymbolContext sc;
sc.module_sp = m_objfile->GetModule();
for (size_t i = 0; i < num_indices; i++) {
sc.symbol = SymbolAtIndex(symbol_indexes[i]);
if (sc.symbol)
sc_list.AppendIfUnique(sc, merge_symbol_into_function);
}
}
}
size_t Symtab::FindFunctionSymbols(ConstString name,
uint32_t name_type_mask,
SymbolContextList &sc_list) {
size_t count = 0;
std::vector<uint32_t> symbol_indexes;
// eFunctionNameTypeAuto should be pre-resolved by a call to
// Module::LookupInfo::LookupInfo()
assert((name_type_mask & eFunctionNameTypeAuto) == 0);
if (name_type_mask & (eFunctionNameTypeBase | eFunctionNameTypeFull)) {
std::vector<uint32_t> temp_symbol_indexes;
FindAllSymbolsWithNameAndType(name, eSymbolTypeAny, temp_symbol_indexes);
unsigned temp_symbol_indexes_size = temp_symbol_indexes.size();
if (temp_symbol_indexes_size > 0) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
for (unsigned i = 0; i < temp_symbol_indexes_size; i++) {
SymbolContext sym_ctx;
sym_ctx.symbol = SymbolAtIndex(temp_symbol_indexes[i]);
if (sym_ctx.symbol) {
switch (sym_ctx.symbol->GetType()) {
case eSymbolTypeCode:
case eSymbolTypeResolver:
case eSymbolTypeReExported:
symbol_indexes.push_back(temp_symbol_indexes[i]);
break;
default:
break;
}
}
}
}
}
if (name_type_mask & eFunctionNameTypeBase) {
// From mangled names we can't tell what is a basename and what is a method
// name, so we just treat them the same
if (!m_name_indexes_computed)
InitNameIndexes();
if (!m_basename_to_index.IsEmpty()) {
const UniqueCStringMap<uint32_t>::Entry *match;
for (match = m_basename_to_index.FindFirstValueForName(name);
match != nullptr;
match = m_basename_to_index.FindNextValueForName(match)) {
symbol_indexes.push_back(match->value);
}
}
}
if (name_type_mask & eFunctionNameTypeMethod) {
if (!m_name_indexes_computed)
InitNameIndexes();
if (!m_method_to_index.IsEmpty()) {
const UniqueCStringMap<uint32_t>::Entry *match;
for (match = m_method_to_index.FindFirstValueForName(name);
match != nullptr;
match = m_method_to_index.FindNextValueForName(match)) {
symbol_indexes.push_back(match->value);
}
}
}
if (name_type_mask & eFunctionNameTypeSelector) {
if (!m_name_indexes_computed)
InitNameIndexes();
if (!m_selector_to_index.IsEmpty()) {
const UniqueCStringMap<uint32_t>::Entry *match;
for (match = m_selector_to_index.FindFirstValueForName(name);
match != nullptr;
match = m_selector_to_index.FindNextValueForName(match)) {
symbol_indexes.push_back(match->value);
}
}
}
if (!symbol_indexes.empty()) {
llvm::sort(symbol_indexes.begin(), symbol_indexes.end());
symbol_indexes.erase(
std::unique(symbol_indexes.begin(), symbol_indexes.end()),
symbol_indexes.end());
count = symbol_indexes.size();
SymbolIndicesToSymbolContextList(symbol_indexes, sc_list);
}
return count;
}
const Symbol *Symtab::GetParent(Symbol *child_symbol) const {
uint32_t child_idx = GetIndexForSymbol(child_symbol);
if (child_idx != UINT32_MAX && child_idx > 0) {
for (uint32_t idx = child_idx - 1; idx != UINT32_MAX; --idx) {
const Symbol *symbol = SymbolAtIndex(idx);
const uint32_t sibling_idx = symbol->GetSiblingIndex();
if (sibling_idx != UINT32_MAX && sibling_idx > child_idx)
return symbol;
}
}
return nullptr;
}
Index: stable/12/contrib/llvm-project/lldb
===================================================================
--- stable/12/contrib/llvm-project/lldb (revision 356464)
+++ stable/12/contrib/llvm-project/lldb (revision 356465)
Property changes on: stable/12/contrib/llvm-project/lldb
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /vendor/llvm-project/release-9.x/lldb:r355951-355987
Merged /head/contrib/llvm-project/lldb:r356004
Index: stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h (revision 356465)
@@ -1,1053 +1,1057 @@
//===- llvm/CodeGen/MachineFunction.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Collect native machine code for a function. This class contains a list of
// MachineBasicBlock instances that make up the current compiled function.
//
// This class also contains pointers to various classes which hold
// target-specific information about the generated code.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
#define LLVM_CODEGEN_MACHINEFUNCTION_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Recycler.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
namespace llvm {
class BasicBlock;
class BlockAddress;
class DataLayout;
class DebugLoc;
class DIExpression;
class DILocalVariable;
class DILocation;
class Function;
class GlobalValue;
class LLVMTargetMachine;
class MachineConstantPool;
class MachineFrameInfo;
class MachineFunction;
class MachineJumpTableInfo;
class MachineModuleInfo;
class MachineRegisterInfo;
class MCContext;
class MCInstrDesc;
class MCSymbol;
class Pass;
class PseudoSourceValueManager;
class raw_ostream;
class SlotIndexes;
class TargetRegisterClass;
class TargetSubtargetInfo;
struct WasmEHFuncInfo;
struct WinEHFuncInfo;
template <> struct ilist_alloc_traits<MachineBasicBlock> {
void deleteNode(MachineBasicBlock *MBB);
};
template <> struct ilist_callback_traits<MachineBasicBlock> {
void addNodeToList(MachineBasicBlock* N);
void removeNodeFromList(MachineBasicBlock* N);
template <class Iterator>
void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
assert(this == &OldList && "never transfer MBBs between functions");
}
};
/// MachineFunctionInfo - This class can be derived from and used by targets to
/// hold private target-specific information for each MachineFunction. Objects
/// of type are accessed/created with MF::getInfo and destroyed when the
/// MachineFunction is destroyed.
struct MachineFunctionInfo {
virtual ~MachineFunctionInfo();
/// Factory function: default behavior is to call new using the
/// supplied allocator.
///
/// This function can be overridden in a derive class.
template<typename Ty>
static Ty *create(BumpPtrAllocator &Allocator, MachineFunction &MF) {
return new (Allocator.Allocate<Ty>()) Ty(MF);
}
};
/// Properties which a MachineFunction may have at a given point in time.
/// Each of these has checking code in the MachineVerifier, and passes can
/// require that a property be set.
class MachineFunctionProperties {
// Possible TODO: Allow targets to extend this (perhaps by allowing the
// constructor to specify the size of the bit vector)
// Possible TODO: Allow requiring the negative (e.g. VRegsAllocated could be
// stated as the negative of "has vregs"
public:
// The properties are stated in "positive" form; i.e. a pass could require
// that the property hold, but not that it does not hold.
// Property descriptions:
// IsSSA: True when the machine function is in SSA form and virtual registers
// have a single def.
// NoPHIs: The machine function does not contain any PHI instruction.
// TracksLiveness: True when tracking register liveness accurately.
// While this property is set, register liveness information in basic block
// live-in lists and machine instruction operands (e.g. kill flags, implicit
// defs) is accurate. This means it can be used to change the code in ways
// that affect the values in registers, for example by the register
// scavenger.
// When this property is clear, liveness is no longer reliable.
// NoVRegs: The machine function does not use any virtual registers.
// Legalized: In GlobalISel: the MachineLegalizer ran and all pre-isel generic
// instructions have been legalized; i.e., all instructions are now one of:
// - generic and always legal (e.g., COPY)
// - target-specific
// - legal pre-isel generic instructions.
// RegBankSelected: In GlobalISel: the RegBankSelect pass ran and all generic
// virtual registers have been assigned to a register bank.
// Selected: In GlobalISel: the InstructionSelect pass ran and all pre-isel
// generic instructions have been eliminated; i.e., all instructions are now
// target-specific or non-pre-isel generic instructions (e.g., COPY).
// Since only pre-isel generic instructions can have generic virtual register
// operands, this also means that all generic virtual registers have been
// constrained to virtual registers (assigned to register classes) and that
// all sizes attached to them have been eliminated.
enum class Property : unsigned {
IsSSA,
NoPHIs,
TracksLiveness,
NoVRegs,
FailedISel,
Legalized,
RegBankSelected,
Selected,
LastProperty = Selected,
};
bool hasProperty(Property P) const {
return Properties[static_cast<unsigned>(P)];
}
MachineFunctionProperties &set(Property P) {
Properties.set(static_cast<unsigned>(P));
return *this;
}
MachineFunctionProperties &reset(Property P) {
Properties.reset(static_cast<unsigned>(P));
return *this;
}
/// Reset all the properties.
MachineFunctionProperties &reset() {
Properties.reset();
return *this;
}
MachineFunctionProperties &set(const MachineFunctionProperties &MFP) {
Properties |= MFP.Properties;
return *this;
}
MachineFunctionProperties &reset(const MachineFunctionProperties &MFP) {
Properties.reset(MFP.Properties);
return *this;
}
// Returns true if all properties set in V (i.e. required by a pass) are set
// in this.
bool verifyRequiredProperties(const MachineFunctionProperties &V) const {
return !V.Properties.test(Properties);
}
/// Print the MachineFunctionProperties in human-readable form.
void print(raw_ostream &OS) const;
private:
BitVector Properties =
BitVector(static_cast<unsigned>(Property::LastProperty)+1);
};
struct SEHHandler {
/// Filter or finally function. Null indicates a catch-all.
const Function *FilterOrFinally;
/// Address of block to recover at. Null for a finally handler.
const BlockAddress *RecoverBA;
};
/// This structure is used to retain landing pad info for the current function.
struct LandingPadInfo {
MachineBasicBlock *LandingPadBlock; // Landing pad block.
SmallVector<MCSymbol *, 1> BeginLabels; // Labels prior to invoke.
SmallVector<MCSymbol *, 1> EndLabels; // Labels after invoke.
SmallVector<SEHHandler, 1> SEHHandlers; // SEH handlers active at this lpad.
MCSymbol *LandingPadLabel = nullptr; // Label at beginning of landing pad.
std::vector<int> TypeIds; // List of type ids (filters negative).
explicit LandingPadInfo(MachineBasicBlock *MBB)
: LandingPadBlock(MBB) {}
};
class MachineFunction {
const Function &F;
const LLVMTargetMachine &Target;
const TargetSubtargetInfo *STI;
MCContext &Ctx;
MachineModuleInfo &MMI;
// RegInfo - Information about each register in use in the function.
MachineRegisterInfo *RegInfo;
// Used to keep track of target-specific per-machine function information for
// the target implementation.
MachineFunctionInfo *MFInfo;
// Keep track of objects allocated on the stack.
MachineFrameInfo *FrameInfo;
// Keep track of constants which are spilled to memory
MachineConstantPool *ConstantPool;
// Keep track of jump tables for switch instructions
MachineJumpTableInfo *JumpTableInfo;
// Keeps track of Wasm exception handling related data. This will be null for
// functions that aren't using a wasm EH personality.
WasmEHFuncInfo *WasmEHInfo = nullptr;
// Keeps track of Windows exception handling related data. This will be null
// for functions that aren't using a funclet-based EH personality.
WinEHFuncInfo *WinEHInfo = nullptr;
// Function-level unique numbering for MachineBasicBlocks. When a
// MachineBasicBlock is inserted into a MachineFunction is it automatically
// numbered and this vector keeps track of the mapping from ID's to MBB's.
std::vector<MachineBasicBlock*> MBBNumbering;
// Pool-allocate MachineFunction-lifetime and IR objects.
BumpPtrAllocator Allocator;
// Allocation management for instructions in function.
Recycler<MachineInstr> InstructionRecycler;
// Allocation management for operand arrays on instructions.
ArrayRecycler<MachineOperand> OperandRecycler;
// Allocation management for basic blocks in function.
Recycler<MachineBasicBlock> BasicBlockRecycler;
// List of machine basic blocks in function
using BasicBlockListType = ilist<MachineBasicBlock>;
BasicBlockListType BasicBlocks;
/// FunctionNumber - This provides a unique ID for each function emitted in
/// this translation unit.
///
unsigned FunctionNumber;
/// Alignment - The alignment of the function.
unsigned Alignment;
/// ExposesReturnsTwice - True if the function calls setjmp or related
/// functions with attribute "returns twice", but doesn't have
/// the attribute itself.
/// This is used to limit optimizations which cannot reason
/// about the control flow of such functions.
bool ExposesReturnsTwice = false;
/// True if the function includes any inline assembly.
bool HasInlineAsm = false;
/// True if any WinCFI instruction have been emitted in this function.
bool HasWinCFI = false;
/// Current high-level properties of the IR of the function (e.g. is in SSA
/// form or whether registers have been allocated)
MachineFunctionProperties Properties;
// Allocation management for pseudo source values.
std::unique_ptr<PseudoSourceValueManager> PSVManager;
/// List of moves done by a function's prolog. Used to construct frame maps
/// by debug and exception handling consumers.
std::vector<MCCFIInstruction> FrameInstructions;
/// \name Exception Handling
/// \{
/// List of LandingPadInfo describing the landing pad information.
std::vector<LandingPadInfo> LandingPads;
/// Map a landing pad's EH symbol to the call site indexes.
DenseMap<MCSymbol*, SmallVector<unsigned, 4>> LPadToCallSiteMap;
/// Map a landing pad to its index.
DenseMap<const MachineBasicBlock *, unsigned> WasmLPadToIndexMap;
/// Map of invoke call site index values to associated begin EH_LABEL.
DenseMap<MCSymbol*, unsigned> CallSiteMap;
/// CodeView label annotations.
std::vector<std::pair<MCSymbol *, MDNode *>> CodeViewAnnotations;
/// CodeView heapallocsites.
std::vector<std::tuple<MCSymbol*, MCSymbol*, DIType*>>
CodeViewHeapAllocSites;
bool CallsEHReturn = false;
bool CallsUnwindInit = false;
bool HasEHScopes = false;
bool HasEHFunclets = false;
/// List of C++ TypeInfo used.
std::vector<const GlobalValue *> TypeInfos;
/// List of typeids encoding filters used.
std::vector<unsigned> FilterIds;
/// List of the indices in FilterIds corresponding to filter terminators.
std::vector<unsigned> FilterEnds;
EHPersonality PersonalityTypeCache = EHPersonality::Unknown;
/// \}
/// Clear all the members of this MachineFunction, but the ones used
/// to initialize again the MachineFunction.
/// More specifically, this deallocates all the dynamically allocated
/// objects and get rid of all the XXXInfo data structure, but keep
/// unchanged the references to Fn, Target, MMI, and FunctionNumber.
void clear();
/// Allocate and initialize the different members.
/// In particular, the XXXInfo data structure.
/// \pre Fn, Target, MMI, and FunctionNumber are properly set.
void init();
public:
struct VariableDbgInfo {
const DILocalVariable *Var;
const DIExpression *Expr;
// The Slot can be negative for fixed stack objects.
int Slot;
const DILocation *Loc;
VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
int Slot, const DILocation *Loc)
: Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
};
class Delegate {
virtual void anchor();
public:
virtual ~Delegate() = default;
/// Callback after an insertion. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI) = 0;
/// Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleRemoval(MachineInstr &MI) = 0;
};
/// Structure used to represent pair of argument number after call lowering
/// and register used to transfer that argument.
/// For now we support only cases when argument is transferred through one
/// register.
struct ArgRegPair {
unsigned Reg;
uint16_t ArgNo;
ArgRegPair(unsigned R, unsigned Arg) : Reg(R), ArgNo(Arg) {
assert(Arg < (1 << 16) && "Arg out of range");
}
};
/// Vector of call argument and its forwarding register.
using CallSiteInfo = SmallVector<ArgRegPair, 1>;
using CallSiteInfoImpl = SmallVectorImpl<ArgRegPair>;
private:
Delegate *TheDelegate = nullptr;
using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
/// Map a call instruction to call site arguments forwarding info.
CallSiteInfoMap CallSitesInfo;
// Callbacks for insertion and removal.
void handleInsertion(MachineInstr &MI);
void handleRemoval(MachineInstr &MI);
friend struct ilist_traits<MachineInstr>;
public:
using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
VariableDbgInfoMapTy VariableDbgInfos;
MachineFunction(const Function &F, const LLVMTargetMachine &Target,
const TargetSubtargetInfo &STI, unsigned FunctionNum,
MachineModuleInfo &MMI);
MachineFunction(const MachineFunction &) = delete;
MachineFunction &operator=(const MachineFunction &) = delete;
~MachineFunction();
/// Reset the instance as if it was just created.
void reset() {
clear();
init();
}
/// Reset the currently registered delegate - otherwise assert.
void resetDelegate(Delegate *delegate) {
assert(TheDelegate == delegate &&
"Only the current delegate can perform reset!");
TheDelegate = nullptr;
}
/// Set the delegate. resetDelegate must be called before attempting
/// to set.
void setDelegate(Delegate *delegate) {
assert(delegate && !TheDelegate &&
"Attempted to set delegate to null, or to change it without "
"first resetting it!");
TheDelegate = delegate;
}
MachineModuleInfo &getMMI() const { return MMI; }
MCContext &getContext() const { return Ctx; }
PseudoSourceValueManager &getPSVManager() const { return *PSVManager; }
/// Return the DataLayout attached to the Module associated to this MF.
const DataLayout &getDataLayout() const;
/// Return the LLVM function that this machine code represents
const Function &getFunction() const { return F; }
/// getName - Return the name of the corresponding LLVM function.
StringRef getName() const;
/// getFunctionNumber - Return a unique ID for the current function.
unsigned getFunctionNumber() const { return FunctionNumber; }
/// getTarget - Return the target machine this machine code is compiled with
const LLVMTargetMachine &getTarget() const { return Target; }
/// getSubtarget - Return the subtarget for which this machine code is being
/// compiled.
const TargetSubtargetInfo &getSubtarget() const { return *STI; }
/// getSubtarget - This method returns a pointer to the specified type of
/// TargetSubtargetInfo. In debug builds, it verifies that the object being
/// returned is of the correct type.
template<typename STC> const STC &getSubtarget() const {
return *static_cast<const STC *>(STI);
}
/// getRegInfo - Return information about the registers currently in use.
MachineRegisterInfo &getRegInfo() { return *RegInfo; }
const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
/// getFrameInfo - Return the frame info object for the current function.
/// This object contains information about objects allocated on the stack
/// frame of the current function in an abstract way.
MachineFrameInfo &getFrameInfo() { return *FrameInfo; }
const MachineFrameInfo &getFrameInfo() const { return *FrameInfo; }
/// getJumpTableInfo - Return the jump table info object for the current
/// function. This object contains information about jump tables in the
/// current function. If the current function has no jump tables, this will
/// return null.
const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
/// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
/// does already exist, allocate one.
MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
/// getConstantPool - Return the constant pool object for the current
/// function.
MachineConstantPool *getConstantPool() { return ConstantPool; }
const MachineConstantPool *getConstantPool() const { return ConstantPool; }
/// getWasmEHFuncInfo - Return information about how the current function uses
/// Wasm exception handling. Returns null for functions that don't use wasm
/// exception handling.
const WasmEHFuncInfo *getWasmEHFuncInfo() const { return WasmEHInfo; }
WasmEHFuncInfo *getWasmEHFuncInfo() { return WasmEHInfo; }
/// getWinEHFuncInfo - Return information about how the current function uses
/// Windows exception handling. Returns null for functions that don't use
/// funclets for exception handling.
const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
/// getAlignment - Return the alignment (log2, not bytes) of the function.
unsigned getAlignment() const { return Alignment; }
/// setAlignment - Set the alignment (log2, not bytes) of the function.
void setAlignment(unsigned A) { Alignment = A; }
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
void ensureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}
/// exposesReturnsTwice - Returns true if the function calls setjmp or
/// any other similar functions with attribute "returns twice" without
/// having the attribute itself.
bool exposesReturnsTwice() const {
return ExposesReturnsTwice;
}
/// setCallsSetJmp - Set a flag that indicates if there's a call to
/// a "returns twice" function.
void setExposesReturnsTwice(bool B) {
ExposesReturnsTwice = B;
}
/// Returns true if the function contains any inline assembly.
bool hasInlineAsm() const {
return HasInlineAsm;
}
/// Set a flag that indicates that the function contains inline assembly.
void setHasInlineAsm(bool B) {
HasInlineAsm = B;
}
bool hasWinCFI() const {
return HasWinCFI;
}
void setHasWinCFI(bool v) { HasWinCFI = v; }
/// Get the function properties
const MachineFunctionProperties &getProperties() const { return Properties; }
MachineFunctionProperties &getProperties() { return Properties; }
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
///
template<typename Ty>
Ty *getInfo() {
if (!MFInfo)
MFInfo = Ty::template create<Ty>(Allocator, *this);
return static_cast<Ty*>(MFInfo);
}
template<typename Ty>
const Ty *getInfo() const {
return const_cast<MachineFunction*>(this)->getInfo<Ty>();
}
/// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
/// are inserted into the machine function. The block number for a machine
/// basic block can be found by using the MBB::getNumber method, this method
/// provides the inverse mapping.
MachineBasicBlock *getBlockNumbered(unsigned N) const {
assert(N < MBBNumbering.size() && "Illegal block number");
assert(MBBNumbering[N] && "Block was removed from the machine function!");
return MBBNumbering[N];
}
/// Should we be emitting segmented stack stuff for the function
bool shouldSplitStack() const;
/// getNumBlockIDs - Return the number of MBB ID's allocated.
unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
/// recomputes them. This guarantees that the MBB numbers are sequential,
/// dense, and match the ordering of the blocks within the function. If a
/// specific MachineBasicBlock is specified, only that block and those after
/// it are renumbered.
void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
/// print - Print out the MachineFunction in a format suitable for debugging
/// to the specified stream.
void print(raw_ostream &OS, const SlotIndexes* = nullptr) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
/// program, displaying the CFG of the current function with the code for each
/// basic block inside. This depends on there being a 'dot' and 'gv' program
/// in your path.
void viewCFG() const;
/// viewCFGOnly - This function is meant for use from the debugger. It works
/// just like viewCFG, but it does not include the contents of basic blocks
/// into the nodes, just the label. If you are only interested in the CFG
/// this can make the graph smaller.
///
void viewCFGOnly() const;
/// dump - Print the current MachineFunction to cerr, useful for debugger use.
void dump() const;
/// Run the current MachineFunction through the machine code verifier, useful
/// for debugger use.
/// \returns true if no problems were found.
bool verify(Pass *p = nullptr, const char *Banner = nullptr,
bool AbortOnError = true) const;
// Provide accessors for the MachineBasicBlock list...
using iterator = BasicBlockListType::iterator;
using const_iterator = BasicBlockListType::const_iterator;
using const_reverse_iterator = BasicBlockListType::const_reverse_iterator;
using reverse_iterator = BasicBlockListType::reverse_iterator;
/// Support for MachineBasicBlock::getNextNode().
static BasicBlockListType MachineFunction::*
getSublistAccess(MachineBasicBlock *) {
return &MachineFunction::BasicBlocks;
}
/// addLiveIn - Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
//===--------------------------------------------------------------------===//
// BasicBlock accessor functions.
//
iterator begin() { return BasicBlocks.begin(); }
const_iterator begin() const { return BasicBlocks.begin(); }
iterator end () { return BasicBlocks.end(); }
const_iterator end () const { return BasicBlocks.end(); }
reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
reverse_iterator rend () { return BasicBlocks.rend(); }
const_reverse_iterator rend () const { return BasicBlocks.rend(); }
unsigned size() const { return (unsigned)BasicBlocks.size();}
bool empty() const { return BasicBlocks.empty(); }
const MachineBasicBlock &front() const { return BasicBlocks.front(); }
MachineBasicBlock &front() { return BasicBlocks.front(); }
const MachineBasicBlock & back() const { return BasicBlocks.back(); }
MachineBasicBlock & back() { return BasicBlocks.back(); }
void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
void insert(iterator MBBI, MachineBasicBlock *MBB) {
BasicBlocks.insert(MBBI, MBB);
}
void splice(iterator InsertPt, iterator MBBI) {
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
}
void splice(iterator InsertPt, MachineBasicBlock *MBB) {
BasicBlocks.splice(InsertPt, BasicBlocks, MBB);
}
void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
}
void remove(iterator MBBI) { BasicBlocks.remove(MBBI); }
void remove(MachineBasicBlock *MBBI) { BasicBlocks.remove(MBBI); }
void erase(iterator MBBI) { BasicBlocks.erase(MBBI); }
void erase(MachineBasicBlock *MBBI) { BasicBlocks.erase(MBBI); }
template <typename Comp>
void sort(Comp comp) {
BasicBlocks.sort(comp);
}
/// Return the number of \p MachineInstrs in this \p MachineFunction.
unsigned getInstructionCount() const {
unsigned InstrCount = 0;
for (const MachineBasicBlock &MBB : BasicBlocks)
InstrCount += MBB.size();
return InstrCount;
}
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
/// Adds the MBB to the internal numbering. Returns the unique number
/// assigned to the MBB.
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
MBBNumbering.push_back(MBB);
return (unsigned)MBBNumbering.size()-1;
}
/// removeFromMBBNumbering - Remove the specific machine basic block from our
/// tracker, this is only really to be used by the MachineBasicBlock
/// implementation.
void removeFromMBBNumbering(unsigned N) {
assert(N < MBBNumbering.size() && "Illegal basic block #");
MBBNumbering[N] = nullptr;
}
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
bool NoImp = false);
/// Create a new MachineInstr which is a copy of \p Orig, identical in all
/// ways except the instruction has no parent, prev, or next. Bundling flags
/// are reset.
///
/// Note: Clones a single instruction, not whole instruction bundles.
/// Does not perform target specific adjustments; consider using
/// TargetInstrInfo::duplicate() instead.
MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
/// Clones instruction or the whole instruction bundle \p Orig and insert
/// into \p MBB before \p InsertBefore.
///
/// Note: Does not perform target specific adjustments; consider using
/// TargetInstrInfo::duplicate() intead.
MachineInstr &CloneMachineInstrBundle(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig);
/// DeleteMachineInstr - Delete the given MachineInstr.
void DeleteMachineInstr(MachineInstr *MI);
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
/// instead of `new MachineBasicBlock'.
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size);
/// Allocate a new MachineMemOperand by copying an existing one,
/// replacing only AliasAnalysis information. MachineMemOperands are owned
/// by the MachineFunction and need not be explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
const AAMDNodes &AAInfo);
/// Allocate a new MachineMemOperand by copying an existing one,
/// replacing the flags. MachineMemOperands are owned
/// by the MachineFunction and need not be explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand::Flags Flags);
using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
/// Allocate an array of MachineOperands. This is only intended for use by
/// internal MachineInstr functions.
MachineOperand *allocateOperandArray(OperandCapacity Cap) {
return OperandRecycler.allocate(Cap, Allocator);
}
/// Dellocate an array of MachineOperands and recycle the memory. This is
/// only intended for use by internal MachineInstr functions.
/// Cap must be the same capacity that was used to allocate the array.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
OperandRecycler.deallocate(Cap, Array);
}
/// Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegMask();
/// Allocate and construct an extra info structure for a `MachineInstr`.
///
/// This is allocated on the function's allocator and so lives the life of
/// the function.
MachineInstr::ExtraInfo *
createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
MCSymbol *PreInstrSymbol = nullptr,
MCSymbol *PostInstrSymbol = nullptr);
+ MachineInstr::ExtraInfo *createMIExtraInfoWithMarker(
+ ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
+ MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker);
+
/// Allocate a string and populate it with the given external symbol name.
const char *createExternalSymbolName(StringRef Name);
//===--------------------------------------------------------------------===//
// Label Manipulation.
/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate = false) const;
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
/// base.
MCSymbol *getPICBaseSymbol() const;
/// Returns a reference to a list of cfi instructions in the function's
/// prologue. Used to construct frame maps for debug and exception handling
/// comsumers.
const std::vector<MCCFIInstruction> &getFrameInstructions() const {
return FrameInstructions;
}
LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst);
/// \name Exception Handling
/// \{
bool callsEHReturn() const { return CallsEHReturn; }
void setCallsEHReturn(bool b) { CallsEHReturn = b; }
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
bool hasEHScopes() const { return HasEHScopes; }
void setHasEHScopes(bool V) { HasEHScopes = V; }
bool hasEHFunclets() const { return HasEHFunclets; }
void setHasEHFunclets(bool V) { HasEHFunclets = V; }
/// Find or create an LandingPadInfo for the specified MachineBasicBlock.
LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
/// Remap landing pad labels and remove any deleted landing pads.
void tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap = nullptr,
bool TidyIfNoBeginLabels = true);
/// Return a reference to the landing pad info for the current function.
const std::vector<LandingPadInfo> &getLandingPads() const {
return LandingPads;
}
/// Provide the begin and end labels of an invoke style call and associate it
/// with a try landing pad block.
void addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel);
/// Add a new panding pad, and extract the exception handling information from
/// the landingpad instruction. Returns the label ID for the landing pad
/// entry.
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
/// Provide the catch typeinfo for a landing pad.
void addCatchTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo);
/// Provide the filter typeinfo for a landing pad.
void addFilterTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo);
/// Add a cleanup action for a landing pad.
void addCleanup(MachineBasicBlock *LandingPad);
void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
const BlockAddress *RecoverBA);
void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
const Function *Cleanup);
/// Return the type id for the specified typeinfo. This is function wide.
unsigned getTypeIDFor(const GlobalValue *TI);
/// Return the id of the filter encoded by TyIds. This is function wide.
int getFilterIDFor(std::vector<unsigned> &TyIds);
/// Map the landing pad's EH symbol to the call site indexes.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
/// Map the landing pad to its index. Used for Wasm exception handling.
void setWasmLandingPadIndex(const MachineBasicBlock *LPad, unsigned Index) {
WasmLPadToIndexMap[LPad] = Index;
}
/// Returns true if the landing pad has an associate index in wasm EH.
bool hasWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
return WasmLPadToIndexMap.count(LPad);
}
/// Get the index in wasm EH for a given landing pad.
unsigned getWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
assert(hasWasmLandingPadIndex(LPad));
return WasmLPadToIndexMap.lookup(LPad);
}
/// Get the call site indexes for a landing pad EH symbol.
SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
assert(hasCallSiteLandingPad(Sym) &&
"missing call site number for landing pad!");
return LPadToCallSiteMap[Sym];
}
/// Return true if the landing pad Eh symbol has an associated call site.
bool hasCallSiteLandingPad(MCSymbol *Sym) {
return !LPadToCallSiteMap[Sym].empty();
}
/// Map the begin label for a call site.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
CallSiteMap[BeginLabel] = Site;
}
/// Get the call site number for a begin label.
unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) const {
assert(hasCallSiteBeginLabel(BeginLabel) &&
"Missing call site number for EH_LABEL!");
return CallSiteMap.lookup(BeginLabel);
}
/// Return true if the begin label has a call site number associated with it.
bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) const {
return CallSiteMap.count(BeginLabel);
}
/// Record annotations associated with a particular label.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD) {
CodeViewAnnotations.push_back({Label, MD});
}
ArrayRef<std::pair<MCSymbol *, MDNode *>> getCodeViewAnnotations() const {
return CodeViewAnnotations;
}
/// Record heapallocsites
void addCodeViewHeapAllocSite(MachineInstr *I, MDNode *MD);
ArrayRef<std::tuple<MCSymbol*, MCSymbol*, DIType*>>
getCodeViewHeapAllocSites() const {
return CodeViewHeapAllocSites;
}
/// Return a reference to the C++ typeinfo for the current function.
const std::vector<const GlobalValue *> &getTypeInfos() const {
return TypeInfos;
}
/// Return a reference to the typeids encoding filters used in the current
/// function.
const std::vector<unsigned> &getFilterIds() const {
return FilterIds;
}
/// \}
/// Collect information used to emit debugging information of a variable.
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
int Slot, const DILocation *Loc) {
VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
}
VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
const VariableDbgInfoMapTy &getVariableDbgInfo() const {
return VariableDbgInfos;
}
void addCallArgsForwardingRegs(const MachineInstr *CallI,
CallSiteInfoImpl &&CallInfo) {
assert(CallI->isCall());
CallSitesInfo[CallI] = std::move(CallInfo);
}
const CallSiteInfoMap &getCallSitesInfo() const {
return CallSitesInfo;
}
/// Update call sites info by deleting entry for \p Old call instruction.
/// If \p New is present then transfer \p Old call info to it. This function
/// should be called before removing call instruction or before replacing
/// call instruction with new one.
void updateCallSiteInfo(const MachineInstr *Old,
const MachineInstr *New = nullptr);
};
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a
// machine function as a graph of machine basic blocks... these are
// the same as the machine basic block iterators, except that the root
// node is implicitly the first node of the function.
//
template <> struct GraphTraits<MachineFunction*> :
public GraphTraits<MachineBasicBlock*> {
static NodeRef getEntryNode(MachineFunction *F) { return &F->front(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
using nodes_iterator = pointer_iterator<MachineFunction::iterator>;
static nodes_iterator nodes_begin(MachineFunction *F) {
return nodes_iterator(F->begin());
}
static nodes_iterator nodes_end(MachineFunction *F) {
return nodes_iterator(F->end());
}
static unsigned size (MachineFunction *F) { return F->size(); }
};
template <> struct GraphTraits<const MachineFunction*> :
public GraphTraits<const MachineBasicBlock*> {
static NodeRef getEntryNode(const MachineFunction *F) { return &F->front(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
using nodes_iterator = pointer_iterator<MachineFunction::const_iterator>;
static nodes_iterator nodes_begin(const MachineFunction *F) {
return nodes_iterator(F->begin());
}
static nodes_iterator nodes_end (const MachineFunction *F) {
return nodes_iterator(F->end());
}
static unsigned size (const MachineFunction *F) {
return F->size();
}
};
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order. Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineFunction*>> :
public GraphTraits<Inverse<MachineBasicBlock*>> {
static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
return &G.Graph->front();
}
};
template <> struct GraphTraits<Inverse<const MachineFunction*>> :
public GraphTraits<Inverse<const MachineBasicBlock*>> {
static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
return &G.Graph->front();
}
};
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEFUNCTION_H
Index: stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h (revision 356465)
@@ -1,1671 +1,1709 @@
//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineInstr class, which is the
// basic representation for all target dependent machine instructions used by
// the back end.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEINSTR_H
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>
namespace llvm {
template <typename T> class ArrayRef;
class DIExpression;
class DILocalVariable;
class MachineBasicBlock;
class MachineFunction;
class MachineMemOperand;
class MachineRegisterInfo;
class ModuleSlotTracker;
class raw_ostream;
template <typename T> class SmallVectorImpl;
class SmallBitVector;
class StringRef;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;
//===----------------------------------------------------------------------===//
/// Representation of each machine instruction.
///
/// This class isn't a POD type, but it must have a trivial destructor. When a
/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
/// without having their destructor called.
///
class MachineInstr
: public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
ilist_sentinel_tracking<true>> {
public:
using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
/// Flags to specify different kinds of comments to output in
/// assembly code. These flags carry semantic information not
/// otherwise easily derivable from the IR text.
///
enum CommentFlag {
ReloadReuse = 0x1, // higher bits are reserved for target dep comments.
NoSchedComment = 0x2,
TAsmComments = 0x4 // Target Asm comments should start from this value.
};
enum MIFlag {
NoFlags = 0,
FrameSetup = 1 << 0, // Instruction is used as a part of
// function frame setup code.
FrameDestroy = 1 << 1, // Instruction is used as a part of
// function frame destruction code.
BundledPred = 1 << 2, // Instruction has bundled predecessors.
BundledSucc = 1 << 3, // Instruction has bundled successors.
FmNoNans = 1 << 4, // Instruction does not support Fast
// math nan values.
FmNoInfs = 1 << 5, // Instruction does not support Fast
// math infinity values.
FmNsz = 1 << 6, // Instruction is not required to retain
// signed zero values.
FmArcp = 1 << 7, // Instruction supports Fast math
// reciprocal approximations.
FmContract = 1 << 8, // Instruction supports Fast math
// contraction operations like fma.
FmAfn = 1 << 9, // Instruction may map to Fast math
// instrinsic approximation.
FmReassoc = 1 << 10, // Instruction supports Fast math
// reassociation of operand order.
NoUWrap = 1 << 11, // Instruction supports binary operator
// no unsigned wrap.
NoSWrap = 1 << 12, // Instruction supports binary operator
// no signed wrap.
IsExact = 1 << 13, // Instruction supports division is
// known to be exact.
FPExcept = 1 << 14, // Instruction may raise floating-point
// exceptions.
};
private:
const MCInstrDesc *MCID; // Instruction descriptor.
MachineBasicBlock *Parent = nullptr; // Pointer to the owning basic block.
// Operands are allocated by an ArrayRecycler.
MachineOperand *Operands = nullptr; // Pointer to the first operand.
unsigned NumOperands = 0; // Number of operands on instruction.
using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
OperandCapacity CapOperands; // Capacity of the Operands array.
uint16_t Flags = 0; // Various bits of additional
// information about machine
// instruction.
uint8_t AsmPrinterFlags = 0; // Various bits of information used by
// the AsmPrinter to emit helpful
// comments. This is *not* semantic
// information. Do not use this for
// anything other than to convey comment
// information to AsmPrinter.
/// Internal implementation detail class that provides out-of-line storage for
/// extra info used by the machine instruction when this info cannot be stored
/// in-line within the instruction itself.
///
/// This has to be defined eagerly due to the implementation constraints of
/// `PointerSumType` where it is used.
class ExtraInfo final
- : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
+ : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *, MDNode *> {
public:
static ExtraInfo *create(BumpPtrAllocator &Allocator,
ArrayRef<MachineMemOperand *> MMOs,
MCSymbol *PreInstrSymbol = nullptr,
- MCSymbol *PostInstrSymbol = nullptr) {
+ MCSymbol *PostInstrSymbol = nullptr,
+ MDNode *HeapAllocMarker = nullptr) {
bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
+ bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
auto *Result = new (Allocator.Allocate(
- totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
- MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
+ totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *>(
+ MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol,
+ HasHeapAllocMarker),
alignof(ExtraInfo)))
- ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
+ ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol,
+ HasHeapAllocMarker);
// Copy the actual data into the trailing objects.
std::copy(MMOs.begin(), MMOs.end(),
Result->getTrailingObjects<MachineMemOperand *>());
if (HasPreInstrSymbol)
Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
if (HasPostInstrSymbol)
Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
PostInstrSymbol;
+ if (HasHeapAllocMarker)
+ Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker;
return Result;
}
ArrayRef<MachineMemOperand *> getMMOs() const {
return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
}
MCSymbol *getPreInstrSymbol() const {
return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
}
MCSymbol *getPostInstrSymbol() const {
return HasPostInstrSymbol
? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
: nullptr;
}
+ MDNode *getHeapAllocMarker() const {
+ return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr;
+ }
+
private:
friend TrailingObjects;
// Description of the extra info, used to interpret the actual optional
// data appended.
//
// Note that this is not terribly space optimized. This leaves a great deal
// of flexibility to fit more in here later.
const int NumMMOs;
const bool HasPreInstrSymbol;
const bool HasPostInstrSymbol;
+ const bool HasHeapAllocMarker;
// Implement the `TrailingObjects` internal API.
size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
return NumMMOs;
}
size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
return HasPreInstrSymbol + HasPostInstrSymbol;
}
+ size_t numTrailingObjects(OverloadToken<MDNode *>) const {
+ return HasHeapAllocMarker;
+ }
// Just a boring constructor to allow us to initialize the sizes. Always use
// the `create` routine above.
- ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
+ ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol,
+ bool HasHeapAllocMarker)
: NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
- HasPostInstrSymbol(HasPostInstrSymbol) {}
+ HasPostInstrSymbol(HasPostInstrSymbol),
+ HasHeapAllocMarker(HasHeapAllocMarker) {}
};
/// Enumeration of the kinds of inline extra info available. It is important
/// that the `MachineMemOperand` inline kind has a tag value of zero to make
/// it accessible as an `ArrayRef`.
enum ExtraInfoInlineKinds {
EIIK_MMO = 0,
EIIK_PreInstrSymbol,
EIIK_PostInstrSymbol,
EIIK_OutOfLine
};
// We store extra information about the instruction here. The common case is
// expected to be nothing or a single pointer (typically a MMO or a symbol).
// We work to optimize this common case by storing it inline here rather than
// requiring a separate allocation, but we fall back to an allocation when
// multiple pointers are needed.
PointerSumType<ExtraInfoInlineKinds,
PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
Info;
DebugLoc debugLoc; // Source line information.
// Intrusive list support
friend struct ilist_traits<MachineInstr>;
friend struct ilist_callback_traits<MachineBasicBlock>;
void setParent(MachineBasicBlock *P) { Parent = P; }
/// This constructor creates a copy of the given
/// MachineInstr in the given MachineFunction.
MachineInstr(MachineFunction &, const MachineInstr &);
/// This constructor create a MachineInstr and add the implicit operands.
/// It reserves space for number of operands specified by
/// MCInstrDesc. An explicit DebugLoc is supplied.
MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
bool NoImp = false);
// MachineInstrs are pool-allocated and owned by MachineFunction.
friend class MachineFunction;
public:
MachineInstr(const MachineInstr &) = delete;
MachineInstr &operator=(const MachineInstr &) = delete;
// Use MachineFunction::DeleteMachineInstr() instead.
~MachineInstr() = delete;
const MachineBasicBlock* getParent() const { return Parent; }
MachineBasicBlock* getParent() { return Parent; }
/// Return the function that contains the basic block that this instruction
/// belongs to.
///
/// Note: this is undefined behaviour if the instruction does not have a
/// parent.
const MachineFunction *getMF() const;
MachineFunction *getMF() {
return const_cast<MachineFunction *>(
static_cast<const MachineInstr *>(this)->getMF());
}
/// Return the asm printer flags bitvector.
uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
/// Clear the AsmPrinter bitvector.
void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
/// Return whether an AsmPrinter flag is set.
bool getAsmPrinterFlag(CommentFlag Flag) const {
return AsmPrinterFlags & Flag;
}
/// Set a flag for the AsmPrinter.
void setAsmPrinterFlag(uint8_t Flag) {
AsmPrinterFlags |= Flag;
}
/// Clear specific AsmPrinter flags.
void clearAsmPrinterFlag(CommentFlag Flag) {
AsmPrinterFlags &= ~Flag;
}
/// Return the MI flags bitvector.
uint16_t getFlags() const {
return Flags;
}
/// Return whether an MI flag is set.
bool getFlag(MIFlag Flag) const {
return Flags & Flag;
}
/// Set a MI flag.
void setFlag(MIFlag Flag) {
Flags |= (uint16_t)Flag;
}
void setFlags(unsigned flags) {
// Filter out the automatically maintained flags.
unsigned Mask = BundledPred | BundledSucc;
Flags = (Flags & Mask) | (flags & ~Mask);
}
/// clearFlag - Clear a MI flag.
void clearFlag(MIFlag Flag) {
Flags &= ~((uint16_t)Flag);
}
/// Return true if MI is in a bundle (but not the first MI in a bundle).
///
/// A bundle looks like this before it's finalized:
/// ----------------
/// | MI |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// In this case, the first MI starts a bundle but is not inside a bundle, the
/// next 2 MIs are considered "inside" the bundle.
///
/// After a bundle is finalized, it looks like this:
/// ----------------
/// | Bundle |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// The first instruction has the special opcode "BUNDLE". It's not "inside"
/// a bundle, but the next three MIs are.
bool isInsideBundle() const {
return getFlag(BundledPred);
}
/// Return true if this instruction part of a bundle. This is true
/// if either itself or its following instruction is marked "InsideBundle".
bool isBundled() const {
return isBundledWithPred() || isBundledWithSucc();
}
/// Return true if this instruction is part of a bundle, and it is not the
/// first instruction in the bundle.
bool isBundledWithPred() const { return getFlag(BundledPred); }
/// Return true if this instruction is part of a bundle, and it is not the
/// last instruction in the bundle.
bool isBundledWithSucc() const { return getFlag(BundledSucc); }
/// Bundle this instruction with its predecessor. This can be an unbundled
/// instruction, or it can be the first instruction in a bundle.
void bundleWithPred();
/// Bundle this instruction with its successor. This can be an unbundled
/// instruction, or it can be the last instruction in a bundle.
void bundleWithSucc();
/// Break bundle above this instruction.
void unbundleFromPred();
/// Break bundle below this instruction.
void unbundleFromSucc();
/// Returns the debug location id of this MachineInstr.
const DebugLoc &getDebugLoc() const { return debugLoc; }
/// Return the debug variable referenced by
/// this DBG_VALUE instruction.
const DILocalVariable *getDebugVariable() const;
/// Return the complex address expression referenced by
/// this DBG_VALUE instruction.
const DIExpression *getDebugExpression() const;
/// Return the debug label referenced by
/// this DBG_LABEL instruction.
const DILabel *getDebugLabel() const;
/// Emit an error referring to the source location of this instruction.
/// This should only be used for inline assembly that is somehow
/// impossible to compile. Other errors should have been handled much
/// earlier.
///
/// If this method returns, the caller should try to recover from the error.
void emitError(StringRef Msg) const;
/// Returns the target instruction descriptor of this MachineInstr.
const MCInstrDesc &getDesc() const { return *MCID; }
/// Returns the opcode of this MachineInstr.
unsigned getOpcode() const { return MCID->Opcode; }
/// Retuns the total number of operands.
unsigned getNumOperands() const { return NumOperands; }
const MachineOperand& getOperand(unsigned i) const {
assert(i < getNumOperands() && "getOperand() out of range!");
return Operands[i];
}
MachineOperand& getOperand(unsigned i) {
assert(i < getNumOperands() && "getOperand() out of range!");
return Operands[i];
}
/// Returns the total number of definitions.
unsigned getNumDefs() const {
return getNumExplicitDefs() + MCID->getNumImplicitDefs();
}
/// Return true if operand \p OpIdx is a subregister index.
bool isOperandSubregIdx(unsigned OpIdx) const {
assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&
"Expected MO_Immediate operand type.");
if (isExtractSubreg() && OpIdx == 2)
return true;
if (isInsertSubreg() && OpIdx == 3)
return true;
if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
return true;
if (isSubregToReg() && OpIdx == 3)
return true;
return false;
}
/// Returns the number of non-implicit operands.
unsigned getNumExplicitOperands() const;
/// Returns the number of non-implicit definitions.
unsigned getNumExplicitDefs() const;
/// iterator/begin/end - Iterate over all operands of a machine instruction.
using mop_iterator = MachineOperand *;
using const_mop_iterator = const MachineOperand *;
mop_iterator operands_begin() { return Operands; }
mop_iterator operands_end() { return Operands + NumOperands; }
const_mop_iterator operands_begin() const { return Operands; }
const_mop_iterator operands_end() const { return Operands + NumOperands; }
iterator_range<mop_iterator> operands() {
return make_range(operands_begin(), operands_end());
}
iterator_range<const_mop_iterator> operands() const {
return make_range(operands_begin(), operands_end());
}
iterator_range<mop_iterator> explicit_operands() {
return make_range(operands_begin(),
operands_begin() + getNumExplicitOperands());
}
iterator_range<const_mop_iterator> explicit_operands() const {
return make_range(operands_begin(),
operands_begin() + getNumExplicitOperands());
}
iterator_range<mop_iterator> implicit_operands() {
return make_range(explicit_operands().end(), operands_end());
}
iterator_range<const_mop_iterator> implicit_operands() const {
return make_range(explicit_operands().end(), operands_end());
}
/// Returns a range over all explicit operands that are register definitions.
/// Implicit definition are not included!
iterator_range<mop_iterator> defs() {
return make_range(operands_begin(),
operands_begin() + getNumExplicitDefs());
}
/// \copydoc defs()
iterator_range<const_mop_iterator> defs() const {
return make_range(operands_begin(),
operands_begin() + getNumExplicitDefs());
}
/// Returns a range that includes all operands that are register uses.
/// This may include unrelated operands which are not register uses.
iterator_range<mop_iterator> uses() {
return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
}
/// \copydoc uses()
iterator_range<const_mop_iterator> uses() const {
return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
}
iterator_range<mop_iterator> explicit_uses() {
return make_range(operands_begin() + getNumExplicitDefs(),
operands_begin() + getNumExplicitOperands());
}
iterator_range<const_mop_iterator> explicit_uses() const {
return make_range(operands_begin() + getNumExplicitDefs(),
operands_begin() + getNumExplicitOperands());
}
/// Returns the number of the operand iterator \p I points to.
unsigned getOperandNo(const_mop_iterator I) const {
return I - operands_begin();
}
/// Access to memory operands of the instruction. If there are none, that does
/// not imply anything about whether the function accesses memory. Instead,
/// the caller must behave conservatively.
ArrayRef<MachineMemOperand *> memoperands() const {
if (!Info)
return {};
if (Info.is<EIIK_MMO>())
return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
return EI->getMMOs();
return {};
}
/// Access to memory operands of the instruction.
///
/// If `memoperands_begin() == memoperands_end()`, that does not imply
/// anything about whether the function accesses memory. Instead, the caller
/// must behave conservatively.
mmo_iterator memoperands_begin() const { return memoperands().begin(); }
/// Access to memory operands of the instruction.
///
/// If `memoperands_begin() == memoperands_end()`, that does not imply
/// anything about whether the function accesses memory. Instead, the caller
/// must behave conservatively.
mmo_iterator memoperands_end() const { return memoperands().end(); }
/// Return true if we don't have any memory operands which described the
/// memory access done by this instruction. If this is true, calling code
/// must be conservative.
bool memoperands_empty() const { return memoperands().empty(); }
/// Return true if this instruction has exactly one MachineMemOperand.
bool hasOneMemOperand() const { return memoperands().size() == 1; }
/// Return the number of memory operands.
unsigned getNumMemOperands() const { return memoperands().size(); }
/// Helper to extract a pre-instruction symbol if one has been added.
MCSymbol *getPreInstrSymbol() const {
if (!Info)
return nullptr;
if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
return S;
if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
return EI->getPreInstrSymbol();
return nullptr;
}
/// Helper to extract a post-instruction symbol if one has been added.
MCSymbol *getPostInstrSymbol() const {
if (!Info)
return nullptr;
if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
return S;
if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
return EI->getPostInstrSymbol();
return nullptr;
}
+ /// Helper to extract a heap alloc marker if one has been added.
+ MDNode *getHeapAllocMarker() const {
+ if (!Info)
+ return nullptr;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getHeapAllocMarker();
+
+ return nullptr;
+ }
+
/// API for querying MachineInstr properties. They are the same as MCInstrDesc
/// queries but they are bundle aware.
enum QueryType {
IgnoreBundle, // Ignore bundles
AnyInBundle, // Return true if any instruction in bundle has property
AllInBundle // Return true if all instructions in bundle have property
};
/// Return true if the instruction (or in the case of a bundle,
/// the instructions inside the bundle) has the specified property.
/// The first argument is the property being queried.
/// The second argument indicates whether the query should look inside
/// instruction bundles.
bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
assert(MCFlag < 64 &&
"MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.");
// Inline the fast path for unbundled or bundle-internal instructions.
if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
return getDesc().getFlags() & (1ULL << MCFlag);
// If this is the first instruction in a bundle, take the slow path.
return hasPropertyInBundle(1ULL << MCFlag, Type);
}
/// Return true if this instruction can have a variable number of operands.
/// In this case, the variable operands will be after the normal
/// operands but before the implicit definitions and uses (if any are
/// present).
bool isVariadic(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Variadic, Type);
}
/// Set if this instruction has an optional definition, e.g.
/// ARM instructions which can set condition code if 's' bit is set.
bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::HasOptionalDef, Type);
}
/// Return true if this is a pseudo instruction that doesn't
/// correspond to a real machine instruction.
bool isPseudo(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Pseudo, Type);
}
bool isReturn(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Return, Type);
}
/// Return true if this is an instruction that marks the end of an EH scope,
/// i.e., a catchpad or a cleanuppad instruction.
bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::EHScopeReturn, Type);
}
bool isCall(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Call, Type);
}
/// Returns true if the specified instruction stops control flow
/// from executing the instruction immediately following it. Examples include
/// unconditional branches and return instructions.
bool isBarrier(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Barrier, Type);
}
/// Returns true if this instruction part of the terminator for a basic block.
/// Typically this is things like return and branch instructions.
///
/// Various passes use this to insert code into the bottom of a basic block,
/// but before control flow occurs.
bool isTerminator(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Terminator, Type);
}
/// Returns true if this is a conditional, unconditional, or indirect branch.
/// Predicates below can be used to discriminate between
/// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
/// get more information.
bool isBranch(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Branch, Type);
}
/// Return true if this is an indirect branch, such as a
/// branch through a register.
bool isIndirectBranch(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::IndirectBranch, Type);
}
/// Return true if this is a branch which may fall
/// through to the next instruction or may transfer control flow to some other
/// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
/// information about this branch.
bool isConditionalBranch(QueryType Type = AnyInBundle) const {
return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
}
/// Return true if this is a branch which always
/// transfers control flow to some other block. The
/// TargetInstrInfo::AnalyzeBranch method can be used to get more information
/// about this branch.
bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
}
/// Return true if this instruction has a predicate operand that
/// controls execution. It may be set to 'always', or may be set to other
/// values. There are various methods in TargetInstrInfo that can be used to
/// control and modify the predicate in this instruction.
bool isPredicable(QueryType Type = AllInBundle) const {
// If it's a bundle than all bundled instructions must be predicable for this
// to return true.
return hasProperty(MCID::Predicable, Type);
}
/// Return true if this instruction is a comparison.
bool isCompare(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Compare, Type);
}
/// Return true if this instruction is a move immediate
/// (including conditional moves) instruction.
bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::MoveImm, Type);
}
/// Return true if this instruction is a register move.
/// (including moving values from subreg to reg)
bool isMoveReg(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::MoveReg, Type);
}
/// Return true if this instruction is a bitcast instruction.
bool isBitcast(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Bitcast, Type);
}
/// Return true if this instruction is a select instruction.
bool isSelect(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Select, Type);
}
/// Return true if this instruction cannot be safely duplicated.
/// For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
bool isNotDuplicable(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::NotDuplicable, Type);
}
/// Return true if this instruction is convergent.
/// Convergent instructions can not be made control-dependent on any
/// additional values.
bool isConvergent(QueryType Type = AnyInBundle) const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_IsConvergent)
return true;
}
return hasProperty(MCID::Convergent, Type);
}
/// Returns true if the specified instruction has a delay slot
/// which must be filled by the code generator.
bool hasDelaySlot(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::DelaySlot, Type);
}
/// Return true for instructions that can be folded as
/// memory operands in other instructions. The most common use for this
/// is instructions that are simple loads from memory that don't modify
/// the loaded value in any way, but it can also be used for instructions
/// that can be expressed as constant-pool loads, such as V_SETALLONES
/// on x86, to allow them to be folded when it is beneficial.
/// This should only be set on instructions that return a value in their
/// only virtual register definition.
bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::FoldableAsLoad, Type);
}
/// Return true if this instruction behaves
/// the same way as the generic REG_SEQUENCE instructions.
/// E.g., on ARM,
/// dX VMOVDRR rY, rZ
/// is equivalent to
/// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
/// override accordingly.
bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::RegSequence, Type);
}
/// Return true if this instruction behaves
/// the same way as the generic EXTRACT_SUBREG instructions.
/// E.g., on ARM,
/// rX, rY VMOVRRD dZ
/// is equivalent to two EXTRACT_SUBREG:
/// rX = EXTRACT_SUBREG dZ, ssub_0
/// rY = EXTRACT_SUBREG dZ, ssub_1
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
/// override accordingly.
bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::ExtractSubreg, Type);
}
/// Return true if this instruction behaves
/// the same way as the generic INSERT_SUBREG instructions.
/// E.g., on ARM,
/// dX = VSETLNi32 dY, rZ, Imm
/// is equivalent to a INSERT_SUBREG:
/// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
/// override accordingly.
bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::InsertSubreg, Type);
}
//===--------------------------------------------------------------------===//
// Side Effect Analysis
//===--------------------------------------------------------------------===//
/// Return true if this instruction could possibly read memory.
/// Instructions with this flag set are not necessarily simple load
/// instructions, they may load a value and modify it, for example.
bool mayLoad(QueryType Type = AnyInBundle) const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_MayLoad)
return true;
}
return hasProperty(MCID::MayLoad, Type);
}
/// Return true if this instruction could possibly modify memory.
/// Instructions with this flag set are not necessarily simple store
/// instructions, they may store a modified value based on their operands, or
/// may not actually modify anything, for example.
bool mayStore(QueryType Type = AnyInBundle) const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_MayStore)
return true;
}
return hasProperty(MCID::MayStore, Type);
}
/// Return true if this instruction could possibly read or modify memory.
bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
return mayLoad(Type) || mayStore(Type);
}
/// Return true if this instruction could possibly raise a floating-point
/// exception. This is the case if the instruction is a floating-point
/// instruction that can in principle raise an exception, as indicated
/// by the MCID::MayRaiseFPException property, *and* at the same time,
/// the instruction is used in a context where we expect floating-point
/// exceptions might be enabled, as indicated by the FPExcept MI flag.
bool mayRaiseFPException() const {
return hasProperty(MCID::MayRaiseFPException) &&
getFlag(MachineInstr::MIFlag::FPExcept);
}
//===--------------------------------------------------------------------===//
// Flags that indicate whether an instruction can be modified by a method.
//===--------------------------------------------------------------------===//
/// Return true if this may be a 2- or 3-address
/// instruction (of the form "X = op Y, Z, ..."), which produces the same
/// result if Y and Z are exchanged. If this flag is set, then the
/// TargetInstrInfo::commuteInstruction method may be used to hack on the
/// instruction.
///
/// Note that this flag may be set on instructions that are only commutable
/// sometimes. In these cases, the call to commuteInstruction will fail.
/// Also note that some instructions require non-trivial modification to
/// commute them.
bool isCommutable(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Commutable, Type);
}
/// Return true if this is a 2-address instruction
/// which can be changed into a 3-address instruction if needed. Doing this
/// transformation can be profitable in the register allocator, because it
/// means that the instruction can use a 2-address form if possible, but
/// degrade into a less efficient form if the source and dest register cannot
/// be assigned to the same register. For example, this allows the x86
/// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
/// is the same speed as the shift but has bigger code size.
///
/// If this returns true, then the target must implement the
/// TargetInstrInfo::convertToThreeAddress method for this instruction, which
/// is allowed to fail if the transformation isn't valid for this specific
/// instruction (e.g. shl reg, 4 on x86).
///
bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::ConvertibleTo3Addr, Type);
}
/// Return true if this instruction requires
/// custom insertion support when the DAG scheduler is inserting it into a
/// machine basic block. If this is true for the instruction, it basically
/// means that it is a pseudo instruction used at SelectionDAG time that is
/// expanded out into magic code by the target when MachineInstrs are formed.
///
/// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
/// is used to insert this into the MachineBasicBlock.
bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::UsesCustomInserter, Type);
}
/// Return true if this instruction requires *adjustment*
/// after instruction selection by calling a target hook. For example, this
/// can be used to fill in ARM 's' optional operand depending on whether
/// the conditional flag register is used.
bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::HasPostISelHook, Type);
}
/// Returns true if this instruction is a candidate for remat.
/// This flag is deprecated, please don't use it anymore. If this
/// flag is set, the isReallyTriviallyReMaterializable() method is called to
/// verify the instruction is really rematable.
bool isRematerializable(QueryType Type = AllInBundle) const {
// It's only possible to re-mat a bundle if all bundled instructions are
// re-materializable.
return hasProperty(MCID::Rematerializable, Type);
}
/// Returns true if this instruction has the same cost (or less) than a move
/// instruction. This is useful during certain types of optimizations
/// (e.g., remat during two-address conversion or machine licm)
/// where we would like to remat or hoist the instruction, but not if it costs
/// more than moving the instruction into the appropriate register. Note, we
/// are not marking copies from and to the same register class with this flag.
bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
// Only returns true for a bundle if all bundled instructions are cheap.
return hasProperty(MCID::CheapAsAMove, Type);
}
/// Returns true if this instruction source operands
/// have special register allocation requirements that are not captured by the
/// operand register classes. e.g. ARM::STRD's two source registers must be an
/// even / odd pair, ARM::STM registers have to be in ascending order.
/// Post-register allocation passes should not attempt to change allocations
/// for sources of instructions with this flag.
bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
}
/// Returns true if this instruction def operands
/// have special register allocation requirements that are not captured by the
/// operand register classes. e.g. ARM::LDRD's two def registers must be an
/// even / odd pair, ARM::LDM registers have to be in ascending order.
/// Post-register allocation passes should not attempt to change allocations
/// for definitions of instructions with this flag.
bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::ExtraDefRegAllocReq, Type);
}
enum MICheckType {
CheckDefs, // Check all operands for equality
CheckKillDead, // Check all operands including kill / dead markers
IgnoreDefs, // Ignore all definitions
IgnoreVRegDefs // Ignore virtual register definitions
};
/// Return true if this instruction is identical to \p Other.
/// Two instructions are identical if they have the same opcode and all their
/// operands are identical (with respect to MachineOperand::isIdenticalTo()).
/// Note that this means liveness related flags (dead, undef, kill) do not
/// affect the notion of identical.
bool isIdenticalTo(const MachineInstr &Other,
MICheckType Check = CheckDefs) const;
/// Unlink 'this' from the containing basic block, and return it without
/// deleting it.
///
/// This function can not be used on bundled instructions, use
/// removeFromBundle() to remove individual instructions from a bundle.
MachineInstr *removeFromParent();
/// Unlink this instruction from its basic block and return it without
/// deleting it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle remain bundled.
MachineInstr *removeFromBundle();
/// Unlink 'this' from the containing basic block and delete it.
///
/// If this instruction is the header of a bundle, the whole bundle is erased.
/// This function can not be used for instructions inside a bundle, use
/// eraseFromBundle() to erase individual bundled instructions.
void eraseFromParent();
/// Unlink 'this' from the containing basic block and delete it.
///
/// For all definitions mark their uses in DBG_VALUE nodes
/// as undefined. Otherwise like eraseFromParent().
void eraseFromParentAndMarkDBGValuesForRemoval();
/// Unlink 'this' form its basic block and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle remain bundled.
void eraseFromBundle();
bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
bool isAnnotationLabel() const {
return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
}
/// Returns true if the MachineInstr represents a label.
bool isLabel() const {
return isEHLabel() || isGCLabel() || isAnnotationLabel();
}
bool isCFIInstruction() const {
return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
}
// True if the instruction represents a position in the function.
bool isPosition() const { return isLabel() || isCFIInstruction(); }
bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
bool isDebugInstr() const { return isDebugValue() || isDebugLabel(); }
/// A DBG_VALUE is indirect iff the first operand is a register and
/// the second operand is an immediate.
bool isIndirectDebugValue() const {
return isDebugValue()
&& getOperand(0).isReg()
&& getOperand(1).isImm();
}
/// A DBG_VALUE is an entry value iff its debug expression contains the
/// DW_OP_entry_value DWARF operation.
bool isDebugEntryValue() const {
return isDebugValue() && getDebugExpression()->isEntryValue();
}
/// Return true if the instruction is a debug value which describes a part of
/// a variable as unavailable.
bool isUndefDebugValue() const {
return isDebugValue() && getOperand(0).isReg() && !getOperand(0).getReg();
}
bool isPHI() const {
return getOpcode() == TargetOpcode::PHI ||
getOpcode() == TargetOpcode::G_PHI;
}
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const {
return getOpcode() == TargetOpcode::INLINEASM ||
getOpcode() == TargetOpcode::INLINEASM_BR;
}
/// FIXME: Seems like a layering violation that the AsmDialect, which is X86
/// specific, be attached to a generic MachineInstr.
bool isMSInlineAsm() const {
return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
}
bool isStackAligningInlineAsm() const;
InlineAsm::AsmDialect getInlineAsmDialect() const;
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
bool isSubregToReg() const {
return getOpcode() == TargetOpcode::SUBREG_TO_REG;
}
bool isRegSequence() const {
return getOpcode() == TargetOpcode::REG_SEQUENCE;
}
bool isBundle() const {
return getOpcode() == TargetOpcode::BUNDLE;
}
bool isCopy() const {
return getOpcode() == TargetOpcode::COPY;
}
bool isFullCopy() const {
return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
}
bool isExtractSubreg() const {
return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
}
/// Return true if the instruction behaves like a copy.
/// This does not include native copy instructions.
bool isCopyLike() const {
return isCopy() || isSubregToReg();
}
/// Return true is the instruction is an identity copy.
bool isIdentityCopy() const {
return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
/// Return true if this instruction doesn't produce any output in the form of
/// executable instructions.
bool isMetaInstruction() const {
switch (getOpcode()) {
default:
return false;
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
case TargetOpcode::CFI_INSTRUCTION:
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::DBG_VALUE:
case TargetOpcode::DBG_LABEL:
case TargetOpcode::LIFETIME_START:
case TargetOpcode::LIFETIME_END:
return true;
}
}
/// Return true if this is a transient instruction that is either very likely
/// to be eliminated during register allocation (such as copy-like
/// instructions), or if this instruction doesn't have an execution-time cost.
bool isTransient() const {
switch (getOpcode()) {
default:
return isMetaInstruction();
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::G_PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
return true;
}
}
/// Return the number of instructions inside the MI bundle, excluding the
/// bundle header.
///
/// This is the number of instructions that MachineBasicBlock::iterator
/// skips, 0 for unbundled instructions.
unsigned getBundleSize() const;
/// Return true if the MachineInstr reads the specified register.
/// If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
/// This does not count partial redefines of virtual registers as reads:
/// %reg1024:6 = OP.
bool readsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
}
/// Return true if the MachineInstr reads the specified virtual register.
/// Take into account that a partial define is a
/// read-modify-write operation.
bool readsVirtualRegister(unsigned Reg) const {
return readsWritesVirtualRegister(Reg).first;
}
/// Return a pair of bools (reads, writes) indicating if this instruction
/// reads or writes Reg. This also considers partial defines.
/// If Ops is not null, all operand indices for Reg are added.
std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
SmallVectorImpl<unsigned> *Ops = nullptr) const;
/// Return true if the MachineInstr kills the specified register.
/// If TargetRegisterInfo is passed, then it also checks if there is
/// a kill of a super-register.
bool killsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
}
/// Return true if the MachineInstr fully defines the specified register.
/// If TargetRegisterInfo is passed, then it also checks
/// if there is a def of a super-register.
/// NOTE: It's ignoring subreg indices on virtual registers.
bool definesRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
}
/// Return true if the MachineInstr modifies (fully define or partially
/// define) the specified register.
/// NOTE: It's ignoring subreg indices on virtual registers.
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
}
/// Returns true if the register is dead in this machine instruction.
/// If TargetRegisterInfo is passed, then it also checks
/// if there is a dead def of a super-register.
bool registerDefIsDead(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
}
/// Returns true if the MachineInstr has an implicit-use operand of exactly
/// the given register (not considering sub/super-registers).
bool hasRegisterImplicitUseOperand(unsigned Reg) const;
/// Returns the operand index that is a use of the specific register or -1
/// if it is not found. It further tightens the search criteria to a use
/// that kills the register if isKill is true.
int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = nullptr) const;
/// Wrapper for findRegisterUseOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
const MachineOperand *findRegisterUseOperand(
unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = nullptr) const {
return const_cast<MachineInstr *>(this)->
findRegisterUseOperand(Reg, isKill, TRI);
}
/// Returns the operand index that is a def of the specified register or
/// -1 if it is not found. If isDead is true, defs that are not dead are
/// skipped. If Overlap is true, then it also looks for defs that merely
/// overlap the specified register. If TargetRegisterInfo is non-null,
/// then it also checks if there is a def of a super-register.
/// This may also return a register mask operand when Overlap is true.
int findRegisterDefOperandIdx(unsigned Reg,
bool isDead = false, bool Overlap = false,
const TargetRegisterInfo *TRI = nullptr) const;
/// Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *
findRegisterDefOperand(unsigned Reg, bool isDead = false,
bool Overlap = false,
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
const MachineOperand *
findRegisterDefOperand(unsigned Reg, bool isDead = false,
bool Overlap = false,
const TargetRegisterInfo *TRI = nullptr) const {
return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
Reg, isDead, Overlap, TRI);
}
/// Find the index of the first operand in the
/// operand list that is used to represent the predicate. It returns -1 if
/// none is found.
int findFirstPredOperandIdx() const;
/// Find the index of the flag word operand that
/// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
/// getOperand(OpIdx) does not belong to an inline asm operand group.
///
/// If GroupNo is not NULL, it will receive the number of the operand group
/// containing OpIdx.
///
/// The flag operand is an immediate that can be decoded with methods like
/// InlineAsm::hasRegClassConstraint().
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
/// Compute the static register class constraint for operand OpIdx.
/// For normal instructions, this is derived from the MCInstrDesc.
/// For inline assembly it is derived from the flag words.
///
/// Returns NULL if the static register class constraint cannot be
/// determined.
const TargetRegisterClass*
getRegClassConstraint(unsigned OpIdx,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
/// Applies the constraints (def/use) implied by this MI on \p Reg to
/// the given \p CurRC.
/// If \p ExploreBundle is set and MI is part of a bundle, all the
/// instructions inside the bundle will be taken into account. In other words,
/// this method accumulates all the constraints of the operand of this MI and
/// the related bundle if MI is a bundle or inside a bundle.
///
/// Returns the register class that satisfies both \p CurRC and the
/// constraints set by MI. Returns NULL if such a register class does not
/// exist.
///
/// \pre CurRC must not be NULL.
const TargetRegisterClass *getRegClassConstraintEffectForVReg(
unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
bool ExploreBundle = false) const;
/// Applies the constraints (def/use) implied by the \p OpIdx operand
/// to the given \p CurRC.
///
/// Returns the register class that satisfies both \p CurRC and the
/// constraints set by \p OpIdx MI. Returns NULL if such a register class
/// does not exist.
///
/// \pre CurRC must not be NULL.
/// \pre The operand at \p OpIdx must be a register.
const TargetRegisterClass *
getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
/// Add a tie between the register operands at DefIdx and UseIdx.
/// The tie will cause the register allocator to ensure that the two
/// operands are assigned the same physical register.
///
/// Tied operands are managed automatically for explicit operands in the
/// MCInstrDesc. This method is for exceptional cases like inline asm.
void tieOperands(unsigned DefIdx, unsigned UseIdx);
/// Given the index of a tied register operand, find the
/// operand it is tied to. Defs are tied to uses and vice versa. Returns the
/// index of the tied operand which must exist.
unsigned findTiedOperandIdx(unsigned OpIdx) const;
/// Given the index of a register def operand,
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
bool isRegTiedToUseOperand(unsigned DefOpIdx,
unsigned *UseOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || !MO.isTied())
return false;
if (UseOpIdx)
*UseOpIdx = findTiedOperandIdx(DefOpIdx);
return true;
}
/// Return true if the use operand of the specified index is tied to a def
/// operand. It also returns the def operand index by reference if DefOpIdx
/// is not null.
bool isRegTiedToDefOperand(unsigned UseOpIdx,
unsigned *DefOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse() || !MO.isTied())
return false;
if (DefOpIdx)
*DefOpIdx = findTiedOperandIdx(UseOpIdx);
return true;
}
/// Clears kill flags on all operands.
void clearKillInfo();
/// Replace all occurrences of FromReg with ToReg:SubIdx,
/// properly composing subreg indices where necessary.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
const TargetRegisterInfo &RegInfo);
/// We have determined MI kills a register. Look for the
/// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
/// add a implicit operand if it's not found. Returns true if the operand
/// exists / is added.
bool addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound = false);
/// Clear all kill flags affecting Reg. If RegInfo is provided, this includes
/// all aliasing registers.
void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
/// We have determined MI defined a register without a use.
/// Look for the operand that defines it and mark it as IsDead. If
/// AddIfNotFound is true, add a implicit operand if it's not found. Returns
/// true if the operand exists / is added.
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
bool AddIfNotFound = false);
/// Clear all dead flags on operands defining register @p Reg.
void clearRegisterDeads(unsigned Reg);
/// Mark all subregister defs of register @p Reg with the undef flag.
/// This function is used when we determined to have a subregister def in an
/// otherwise undefined super register.
void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
/// We have determined MI defines a register. Make sure there is an operand
/// defining Reg.
void addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo = nullptr);
/// Mark every physreg used by this instruction as
/// dead except those in the UsedRegs list.
///
/// On instructions with register mask operands, also add implicit-def
/// operands for all registers in UsedRegs.
void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI);
/// Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
/// Returns true if this instruction's memory access aliases the memory
/// access of Other.
//
/// Assumes any physical registers used to compute addresses
/// have the same value for both instructions. Returns false if neither
/// instruction writes to memory.
///
/// @param AA Optional alias analysis, used to compare memory operands.
/// @param Other MachineInstr to check aliasing against.
/// @param UseTBAA Whether to pass TBAA information to alias analysis.
bool mayAlias(AliasAnalysis *AA, const MachineInstr &Other, bool UseTBAA) const;
/// Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
/// reference is not available. Return false if it is known to have no
/// ordered or volatile memory references.
bool hasOrderedMemoryRef() const;
/// Return true if this load instruction never traps and points to a memory
/// location whose value doesn't change during the execution of this function.
///
/// Examples include loading a value from the constant pool or from the
/// argument area of a function (if it does not change). If the instruction
/// does multiple loads, this returns true only if all of the loads are
/// dereferenceable and invariant.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
/// If the specified instruction is a PHI that always merges together the
/// same virtual register, return the register, otherwise return 0.
unsigned isConstantValuePHI() const;
/// Return true if this instruction has side effects that are not modeled
/// by mayLoad / mayStore, etc.
/// For all instructions, the property is encoded in MCInstrDesc::Flags
/// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
/// INLINEASM instruction, in which case the side effect property is encoded
/// in one of its operands (see InlineAsm::Extra_HasSideEffect).
///
bool hasUnmodeledSideEffects() const;
/// Returns true if it is illegal to fold a load across this instruction.
bool isLoadFoldBarrier() const;
/// Return true if all the defs of this instruction are dead.
bool allDefsAreDead() const;
/// Return a valid size if the instruction is a spill instruction.
Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a folded spill instruction.
Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a restore instruction.
Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a folded restore instruction.
Optional<unsigned>
getFoldedRestoreSize(const TargetInstrInfo *TII) const;
/// Copy implicit register operands from specified
/// instruction to this instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
/// Debugging support
/// @{
/// Determine the generic type to be printed (if needed) on uses and defs.
LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
const MachineRegisterInfo &MRI) const;
/// Return true when an instruction has tied register that can't be determined
/// by the instruction's descriptor. This is useful for MIR printing, to
/// determine whether we need to print the ties or not.
bool hasComplexRegisterTies() const;
/// Print this MI to \p OS.
/// Don't print information that can be inferred from other instructions if
/// \p IsStandalone is false. It is usually true when only a fragment of the
/// function is printed.
/// Only print the defs and the opcode if \p SkipOpers is true.
/// Otherwise, also print operands if \p SkipDebugLoc is true.
/// Otherwise, also print the debug loc, with a terminating newline.
/// \p TII is used to print the opcode name. If it's not present, but the
/// MI is in a function, the opcode will be printed using the function's TII.
void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
bool SkipDebugLoc = false, bool AddNewLine = true,
const TargetInstrInfo *TII = nullptr) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
bool SkipOpers = false, bool SkipDebugLoc = false,
bool AddNewLine = true,
const TargetInstrInfo *TII = nullptr) const;
void dump() const;
/// @}
//===--------------------------------------------------------------------===//
// Accessors used to build up machine instructions.
/// Add the specified operand to the instruction. If it is an implicit
/// operand, it is added to the end of the operand list. If it is an
/// explicit operand it is added at the end of the explicit operand list
/// (before the first implicit operand).
///
/// MF must be the machine function that was used to allocate this
/// instruction.
///
/// MachineInstrBuilder provides a more convenient interface for creating
/// instructions and adding operands.
void addOperand(MachineFunction &MF, const MachineOperand &Op);
/// Add an operand without providing an MF reference. This only works for
/// instructions that are inserted in a basic block.
///
/// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
/// preferred.
void addOperand(const MachineOperand &Op);
/// Replace the instruction descriptor (thus opcode) of
/// the current instruction with a new one.
void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
/// Replace current source information with new such.
/// Avoid using this, the constructor argument is preferable.
void setDebugLoc(DebugLoc dl) {
debugLoc = std::move(dl);
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
}
/// Erase an operand from an instruction, leaving it with one
/// fewer operand than it started with.
void RemoveOperand(unsigned OpNo);
/// Clear this MachineInstr's memory reference descriptor list. This resets
/// the memrefs to their most conservative state. This should be used only
/// as a last resort since it greatly pessimizes our knowledge of the memory
/// access performed by the instruction.
void dropMemRefs(MachineFunction &MF);
/// Assign this MachineInstr's memory reference descriptor list.
///
/// Unlike other methods, this *will* allocate them into a new array
/// associated with the provided `MachineFunction`.
void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
/// Clone another MachineInstr's memory reference descriptor list and replace
/// ours with it.
///
/// Note that `*this` may be the incoming MI!
///
/// Prefer this API whenever possible as it can avoid allocations in common
/// cases.
void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
/// Clone the merge of multiple MachineInstrs' memory reference descriptors
/// list and replace ours with it.
///
/// Note that `*this` may be one of the incoming MIs!
///
/// Prefer this API whenever possible as it can avoid allocations in common
/// cases.
void cloneMergedMemRefs(MachineFunction &MF,
ArrayRef<const MachineInstr *> MIs);
/// Set a symbol that will be emitted just prior to the instruction itself.
///
/// Setting this to a null pointer will remove any such symbol.
///
/// FIXME: This is not fully implemented yet.
void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
/// Set a symbol that will be emitted just after the instruction itself.
///
/// Setting this to a null pointer will remove any such symbol.
///
/// FIXME: This is not fully implemented yet.
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
/// Clone another MachineInstr's pre- and post- instruction symbols and
/// replace ours with it.
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI);
+ /// Set a marker on instructions that denotes where we should create and emit
+ /// heap alloc site labels. This waits until after instruction selection and
+ /// optimizations to create the label, so it should still work if the
+ /// instruction is removed or duplicated.
+ void setHeapAllocMarker(MachineFunction &MF, MDNode *MD);
+
/// Return the MIFlags which represent both MachineInstrs. This
/// should be used when merging two MachineInstrs into one. This routine does
/// not modify the MIFlags of this MachineInstr.
uint16_t mergeFlagsWith(const MachineInstr& Other) const;
static uint16_t copyFlagsFromInstruction(const Instruction &I);
/// Copy all flags to MachineInst MIFlags
void copyIRFlags(const Instruction &I);
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
MachineOperand &MO = getOperand(OpIdx);
if (MO.isReg() && MO.isTied()) {
getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
MO.TiedTo = 0;
}
}
/// Add all implicit def and use operands to this instruction.
void addImplicitDefUseOperands(MachineFunction &MF);
/// Scan instructions following MI and collect any matching DBG_VALUEs.
void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
/// Find all DBG_VALUEs immediately following this instruction that point
/// to a register def in this instruction and point them to \p Reg instead.
void changeDebugValuesDefReg(unsigned Reg);
private:
/// If this instruction is embedded into a MachineFunction, return the
/// MachineRegisterInfo object for the current function, otherwise
/// return null.
MachineRegisterInfo *getRegInfo();
/// Unlink all of the register operands in this instruction from their
/// respective use lists. This requires that the operands already be on their
/// use lists.
void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
/// Add all of the register operands in this instruction from their
/// respective use lists. This requires that the operands not be on their
/// use lists yet.
void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// Slow path for hasProperty when we're dealing with a bundle.
bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
/// Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
/// If the related operand does not constrained Reg, this returns CurRC.
const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
+
+ /// Stores extra instruction information inline or allocates as ExtraInfo
+ /// based on the number of pointers.
+ void setExtraInfo(MachineFunction &MF, ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol,
+ MDNode *HeapAllocMarker);
};
/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
/// instruction rather than by pointer value.
/// The hashing and equality testing functions ignore definitions so this is
/// useful for CSE, etc.
struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
static inline MachineInstr *getEmptyKey() {
return nullptr;
}
static inline MachineInstr *getTombstoneKey() {
return reinterpret_cast<MachineInstr*>(-1);
}
static unsigned getHashValue(const MachineInstr* const &MI);
static bool isEqual(const MachineInstr* const &LHS,
const MachineInstr* const &RHS) {
if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
LHS == getEmptyKey() || LHS == getTombstoneKey())
return LHS == RHS;
return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
}
};
//===----------------------------------------------------------------------===//
// Debugging Support
inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
MI.print(OS);
return OS;
}
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEINSTR_H
Index: stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h (revision 356465)
@@ -1,117 +1,118 @@
//===- StackProtector.h - Stack Protector Insertion -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass inserts stack protectors into functions which need them. A variable
// with a random value in it is stored onto the stack before the local variables
// are allocated. Upon exiting the block, the stored value is checked. If it's
// changed, then there was some sort of violation and the program aborts.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_STACKPROTECTOR_H
#define LLVM_CODEGEN_STACKPROTECTOR_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
namespace llvm {
class BasicBlock;
class DominatorTree;
class Function;
class Instruction;
class Module;
class TargetLoweringBase;
class TargetMachine;
class Type;
class StackProtector : public FunctionPass {
private:
/// A mapping of AllocaInsts to their required SSP layout.
using SSPLayoutMap = DenseMap<const AllocaInst *,
MachineFrameInfo::SSPLayoutKind>;
const TargetMachine *TM = nullptr;
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// target type sizes.
const TargetLoweringBase *TLI = nullptr;
Triple Trip;
Function *F;
Module *M;
DominatorTree *DT;
/// Layout - Mapping of allocations to the required SSPLayoutKind.
/// StackProtector analysis will update this map when determining if an
/// AllocaInst triggers a stack protector.
SSPLayoutMap Layout;
/// The minimum size of buffers that will receive stack smashing
/// protection when -fstack-protection is used.
unsigned SSPBufferSize = 0;
// A prologue is generated.
bool HasPrologue = false;
// IR checking code is generated.
bool HasIRCheck = false;
/// InsertStackProtectors - Insert code into the prologue and epilogue of
/// the function.
///
/// - The prologue code loads and stores the stack guard onto the stack.
/// - The epilogue checks the value stored in the prologue against the
/// original value. It calls __stack_chk_fail if they differ.
bool InsertStackProtectors();
/// CreateFailBB - Create a basic block to jump to when the stack protector
/// check fails.
BasicBlock *CreateFailBB();
/// ContainsProtectableArray - Check whether the type either is an array or
/// contains an array of sufficient size so that we need stack protectors
/// for it.
/// \param [out] IsLarge is set to true if a protectable array is found and
/// it is "large" ( >= ssp-buffer-size). In the case of a structure with
/// multiple arrays, this gets set if any of them is large.
bool ContainsProtectableArray(Type *Ty, bool &IsLarge, bool Strong = false,
bool InStruct = false) const;
/// Check whether a stack allocation has its address taken.
- bool HasAddressTaken(const Instruction *AI);
+ bool HasAddressTaken(const Instruction *AI,
+ SmallPtrSetImpl<const PHINode *> &VisitedPHIs);
/// RequiresStackProtector - Check whether or not this function needs a
/// stack protector based upon the stack protector level.
bool RequiresStackProtector();
public:
static char ID; // Pass identification, replacement for typeid.
StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
// Return true if StackProtector is supposed to be handled by SelectionDAG.
bool shouldEmitSDCheck(const BasicBlock &BB) const;
bool runOnFunction(Function &Fn) override;
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const;
};
} // end namespace llvm
#endif // LLVM_CODEGEN_STACKPROTECTOR_H
Index: stable/12/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h (revision 356465)
@@ -1,626 +1,628 @@
//===- MicrosoftDemangleNodes.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AST nodes used in the MSVC demangler.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_MICROSOFTDEMANGLENODES_H
#define LLVM_SUPPORT_MICROSOFTDEMANGLENODES_H
#include "llvm/Demangle/DemangleConfig.h"
#include "llvm/Demangle/StringView.h"
#include <array>
+#include <cstdint>
+#include <string>
namespace llvm {
namespace itanium_demangle {
class OutputStream;
}
}
using llvm::itanium_demangle::OutputStream;
using llvm::itanium_demangle::StringView;
namespace llvm {
namespace ms_demangle {
// Storage classes
enum Qualifiers : uint8_t {
Q_None = 0,
Q_Const = 1 << 0,
Q_Volatile = 1 << 1,
Q_Far = 1 << 2,
Q_Huge = 1 << 3,
Q_Unaligned = 1 << 4,
Q_Restrict = 1 << 5,
Q_Pointer64 = 1 << 6
};
enum class StorageClass : uint8_t {
None,
PrivateStatic,
ProtectedStatic,
PublicStatic,
Global,
FunctionLocalStatic,
};
enum class PointerAffinity { None, Pointer, Reference, RValueReference };
enum class FunctionRefQualifier { None, Reference, RValueReference };
// Calling conventions
enum class CallingConv : uint8_t {
None,
Cdecl,
Pascal,
Thiscall,
Stdcall,
Fastcall,
Clrcall,
Eabi,
Vectorcall,
Regcall,
};
enum class ReferenceKind : uint8_t { None, LValueRef, RValueRef };
enum OutputFlags {
OF_Default = 0,
OF_NoCallingConvention = 1,
OF_NoTagSpecifier = 2,
};
// Types
enum class PrimitiveKind {
Void,
Bool,
Char,
Schar,
Uchar,
Char8,
Char16,
Char32,
Short,
Ushort,
Int,
Uint,
Long,
Ulong,
Int64,
Uint64,
Wchar,
Float,
Double,
Ldouble,
Nullptr,
};
enum class CharKind {
Char,
Char16,
Char32,
Wchar,
};
enum class IntrinsicFunctionKind : uint8_t {
None,
New, // ?2 # operator new
Delete, // ?3 # operator delete
Assign, // ?4 # operator=
RightShift, // ?5 # operator>>
LeftShift, // ?6 # operator<<
LogicalNot, // ?7 # operator!
Equals, // ?8 # operator==
NotEquals, // ?9 # operator!=
ArraySubscript, // ?A # operator[]
Pointer, // ?C # operator->
Dereference, // ?D # operator*
Increment, // ?E # operator++
Decrement, // ?F # operator--
Minus, // ?G # operator-
Plus, // ?H # operator+
BitwiseAnd, // ?I # operator&
MemberPointer, // ?J # operator->*
Divide, // ?K # operator/
Modulus, // ?L # operator%
LessThan, // ?M operator<
LessThanEqual, // ?N operator<=
GreaterThan, // ?O operator>
GreaterThanEqual, // ?P operator>=
Comma, // ?Q operator,
Parens, // ?R operator()
BitwiseNot, // ?S operator~
BitwiseXor, // ?T operator^
BitwiseOr, // ?U operator|
LogicalAnd, // ?V operator&&
LogicalOr, // ?W operator||
TimesEqual, // ?X operator*=
PlusEqual, // ?Y operator+=
MinusEqual, // ?Z operator-=
DivEqual, // ?_0 operator/=
ModEqual, // ?_1 operator%=
RshEqual, // ?_2 operator>>=
LshEqual, // ?_3 operator<<=
BitwiseAndEqual, // ?_4 operator&=
BitwiseOrEqual, // ?_5 operator|=
BitwiseXorEqual, // ?_6 operator^=
VbaseDtor, // ?_D # vbase destructor
VecDelDtor, // ?_E # vector deleting destructor
DefaultCtorClosure, // ?_F # default constructor closure
ScalarDelDtor, // ?_G # scalar deleting destructor
VecCtorIter, // ?_H # vector constructor iterator
VecDtorIter, // ?_I # vector destructor iterator
VecVbaseCtorIter, // ?_J # vector vbase constructor iterator
VdispMap, // ?_K # virtual displacement map
EHVecCtorIter, // ?_L # eh vector constructor iterator
EHVecDtorIter, // ?_M # eh vector destructor iterator
EHVecVbaseCtorIter, // ?_N # eh vector vbase constructor iterator
CopyCtorClosure, // ?_O # copy constructor closure
LocalVftableCtorClosure, // ?_T # local vftable constructor closure
ArrayNew, // ?_U operator new[]
ArrayDelete, // ?_V operator delete[]
ManVectorCtorIter, // ?__A managed vector ctor iterator
ManVectorDtorIter, // ?__B managed vector dtor iterator
EHVectorCopyCtorIter, // ?__C EH vector copy ctor iterator
EHVectorVbaseCopyCtorIter, // ?__D EH vector vbase copy ctor iterator
VectorCopyCtorIter, // ?__G vector copy constructor iterator
VectorVbaseCopyCtorIter, // ?__H vector vbase copy constructor iterator
ManVectorVbaseCopyCtorIter, // ?__I managed vector vbase copy constructor
CoAwait, // ?__L operator co_await
Spaceship, // ?__M operator<=>
MaxIntrinsic
};
enum class SpecialIntrinsicKind {
None,
Vftable,
Vbtable,
Typeof,
VcallThunk,
LocalStaticGuard,
StringLiteralSymbol,
UdtReturning,
Unknown,
DynamicInitializer,
DynamicAtexitDestructor,
RttiTypeDescriptor,
RttiBaseClassDescriptor,
RttiBaseClassArray,
RttiClassHierarchyDescriptor,
RttiCompleteObjLocator,
LocalVftable,
LocalStaticThreadGuard,
};
// Function classes
enum FuncClass : uint16_t {
FC_None = 0,
FC_Public = 1 << 0,
FC_Protected = 1 << 1,
FC_Private = 1 << 2,
FC_Global = 1 << 3,
FC_Static = 1 << 4,
FC_Virtual = 1 << 5,
FC_Far = 1 << 6,
FC_ExternC = 1 << 7,
FC_NoParameterList = 1 << 8,
FC_VirtualThisAdjust = 1 << 9,
FC_VirtualThisAdjustEx = 1 << 10,
FC_StaticThisAdjust = 1 << 11,
};
enum class TagKind { Class, Struct, Union, Enum };
enum class NodeKind {
Unknown,
Md5Symbol,
PrimitiveType,
FunctionSignature,
Identifier,
NamedIdentifier,
VcallThunkIdentifier,
LocalStaticGuardIdentifier,
IntrinsicFunctionIdentifier,
ConversionOperatorIdentifier,
DynamicStructorIdentifier,
StructorIdentifier,
LiteralOperatorIdentifier,
ThunkSignature,
PointerType,
TagType,
ArrayType,
Custom,
IntrinsicType,
NodeArray,
QualifiedName,
TemplateParameterReference,
EncodedStringLiteral,
IntegerLiteral,
RttiBaseClassDescriptor,
LocalStaticGuardVariable,
FunctionSymbol,
VariableSymbol,
SpecialTableSymbol
};
struct Node {
explicit Node(NodeKind K) : Kind(K) {}
virtual ~Node() = default;
NodeKind kind() const { return Kind; }
virtual void output(OutputStream &OS, OutputFlags Flags) const = 0;
std::string toString(OutputFlags Flags = OF_Default) const;
private:
NodeKind Kind;
};
struct TypeNode;
struct PrimitiveTypeNode;
struct FunctionSignatureNode;
struct IdentifierNode;
struct NamedIdentifierNode;
struct VcallThunkIdentifierNode;
struct IntrinsicFunctionIdentifierNode;
struct LiteralOperatorIdentifierNode;
struct ConversionOperatorIdentifierNode;
struct StructorIdentifierNode;
struct ThunkSignatureNode;
struct PointerTypeNode;
struct ArrayTypeNode;
struct CustomNode;
struct TagTypeNode;
struct IntrinsicTypeNode;
struct NodeArrayNode;
struct QualifiedNameNode;
struct TemplateParameterReferenceNode;
struct EncodedStringLiteralNode;
struct IntegerLiteralNode;
struct RttiBaseClassDescriptorNode;
struct LocalStaticGuardVariableNode;
struct SymbolNode;
struct FunctionSymbolNode;
struct VariableSymbolNode;
struct SpecialTableSymbolNode;
struct TypeNode : public Node {
explicit TypeNode(NodeKind K) : Node(K) {}
virtual void outputPre(OutputStream &OS, OutputFlags Flags) const = 0;
virtual void outputPost(OutputStream &OS, OutputFlags Flags) const = 0;
void output(OutputStream &OS, OutputFlags Flags) const override {
outputPre(OS, Flags);
outputPost(OS, Flags);
}
void outputQuals(bool SpaceBefore, bool SpaceAfter) const;
Qualifiers Quals = Q_None;
};
struct PrimitiveTypeNode : public TypeNode {
explicit PrimitiveTypeNode(PrimitiveKind K)
: TypeNode(NodeKind::PrimitiveType), PrimKind(K) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const;
void outputPost(OutputStream &OS, OutputFlags Flags) const {}
PrimitiveKind PrimKind;
};
struct FunctionSignatureNode : public TypeNode {
explicit FunctionSignatureNode(NodeKind K) : TypeNode(K) {}
FunctionSignatureNode() : TypeNode(NodeKind::FunctionSignature) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const override;
void outputPost(OutputStream &OS, OutputFlags Flags) const override;
// Valid if this FunctionTypeNode is the Pointee of a PointerType or
// MemberPointerType.
PointerAffinity Affinity = PointerAffinity::None;
// The function's calling convention.
CallingConv CallConvention = CallingConv::None;
// Function flags (gloabl, public, etc)
FuncClass FunctionClass = FC_Global;
FunctionRefQualifier RefQualifier = FunctionRefQualifier::None;
// The return type of the function.
TypeNode *ReturnType = nullptr;
// True if this is a C-style ... varargs function.
bool IsVariadic = false;
// Function parameters
NodeArrayNode *Params = nullptr;
// True if the function type is noexcept.
bool IsNoexcept = false;
};
struct IdentifierNode : public Node {
explicit IdentifierNode(NodeKind K) : Node(K) {}
NodeArrayNode *TemplateParams = nullptr;
protected:
void outputTemplateParameters(OutputStream &OS, OutputFlags Flags) const;
};
struct VcallThunkIdentifierNode : public IdentifierNode {
VcallThunkIdentifierNode() : IdentifierNode(NodeKind::VcallThunkIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
uint64_t OffsetInVTable = 0;
};
struct DynamicStructorIdentifierNode : public IdentifierNode {
DynamicStructorIdentifierNode()
: IdentifierNode(NodeKind::DynamicStructorIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
VariableSymbolNode *Variable = nullptr;
QualifiedNameNode *Name = nullptr;
bool IsDestructor = false;
};
struct NamedIdentifierNode : public IdentifierNode {
NamedIdentifierNode() : IdentifierNode(NodeKind::NamedIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
StringView Name;
};
struct IntrinsicFunctionIdentifierNode : public IdentifierNode {
explicit IntrinsicFunctionIdentifierNode(IntrinsicFunctionKind Operator)
: IdentifierNode(NodeKind::IntrinsicFunctionIdentifier),
Operator(Operator) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
IntrinsicFunctionKind Operator;
};
struct LiteralOperatorIdentifierNode : public IdentifierNode {
LiteralOperatorIdentifierNode()
: IdentifierNode(NodeKind::LiteralOperatorIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
StringView Name;
};
struct LocalStaticGuardIdentifierNode : public IdentifierNode {
LocalStaticGuardIdentifierNode()
: IdentifierNode(NodeKind::LocalStaticGuardIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
bool IsThread = false;
uint32_t ScopeIndex = 0;
};
struct ConversionOperatorIdentifierNode : public IdentifierNode {
ConversionOperatorIdentifierNode()
: IdentifierNode(NodeKind::ConversionOperatorIdentifier) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
// The type that this operator converts too.
TypeNode *TargetType = nullptr;
};
struct StructorIdentifierNode : public IdentifierNode {
StructorIdentifierNode() : IdentifierNode(NodeKind::StructorIdentifier) {}
explicit StructorIdentifierNode(bool IsDestructor)
: IdentifierNode(NodeKind::StructorIdentifier),
IsDestructor(IsDestructor) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
// The name of the class that this is a structor of.
IdentifierNode *Class = nullptr;
bool IsDestructor = false;
};
struct ThunkSignatureNode : public FunctionSignatureNode {
ThunkSignatureNode() : FunctionSignatureNode(NodeKind::ThunkSignature) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const override;
void outputPost(OutputStream &OS, OutputFlags Flags) const override;
struct ThisAdjustor {
uint32_t StaticOffset = 0;
int32_t VBPtrOffset = 0;
int32_t VBOffsetOffset = 0;
int32_t VtordispOffset = 0;
};
ThisAdjustor ThisAdjust;
};
struct PointerTypeNode : public TypeNode {
PointerTypeNode() : TypeNode(NodeKind::PointerType) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const override;
void outputPost(OutputStream &OS, OutputFlags Flags) const override;
// Is this a pointer, reference, or rvalue-reference?
PointerAffinity Affinity = PointerAffinity::None;
// If this is a member pointer, this is the class that the member is in.
QualifiedNameNode *ClassParent = nullptr;
// Represents a type X in "a pointer to X", "a reference to X", or
// "rvalue-reference to X"
TypeNode *Pointee = nullptr;
};
struct TagTypeNode : public TypeNode {
explicit TagTypeNode(TagKind Tag) : TypeNode(NodeKind::TagType), Tag(Tag) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const;
void outputPost(OutputStream &OS, OutputFlags Flags) const;
QualifiedNameNode *QualifiedName = nullptr;
TagKind Tag;
};
struct ArrayTypeNode : public TypeNode {
ArrayTypeNode() : TypeNode(NodeKind::ArrayType) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const;
void outputPost(OutputStream &OS, OutputFlags Flags) const;
void outputDimensionsImpl(OutputStream &OS, OutputFlags Flags) const;
void outputOneDimension(OutputStream &OS, OutputFlags Flags, Node *N) const;
// A list of array dimensions. e.g. [3,4,5] in `int Foo[3][4][5]`
NodeArrayNode *Dimensions = nullptr;
// The type of array element.
TypeNode *ElementType = nullptr;
};
struct IntrinsicNode : public TypeNode {
IntrinsicNode() : TypeNode(NodeKind::IntrinsicType) {}
void output(OutputStream &OS, OutputFlags Flags) const override {}
};
struct CustomTypeNode : public TypeNode {
CustomTypeNode() : TypeNode(NodeKind::Custom) {}
void outputPre(OutputStream &OS, OutputFlags Flags) const override;
void outputPost(OutputStream &OS, OutputFlags Flags) const override;
IdentifierNode *Identifier;
};
struct NodeArrayNode : public Node {
NodeArrayNode() : Node(NodeKind::NodeArray) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
void output(OutputStream &OS, OutputFlags Flags, StringView Separator) const;
Node **Nodes = nullptr;
size_t Count = 0;
};
struct QualifiedNameNode : public Node {
QualifiedNameNode() : Node(NodeKind::QualifiedName) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
NodeArrayNode *Components = nullptr;
IdentifierNode *getUnqualifiedIdentifier() {
Node *LastComponent = Components->Nodes[Components->Count - 1];
return static_cast<IdentifierNode *>(LastComponent);
}
};
struct TemplateParameterReferenceNode : public Node {
TemplateParameterReferenceNode()
: Node(NodeKind::TemplateParameterReference) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
SymbolNode *Symbol = nullptr;
int ThunkOffsetCount = 0;
std::array<int64_t, 3> ThunkOffsets;
PointerAffinity Affinity = PointerAffinity::None;
bool IsMemberPointer = false;
};
struct IntegerLiteralNode : public Node {
IntegerLiteralNode() : Node(NodeKind::IntegerLiteral) {}
IntegerLiteralNode(uint64_t Value, bool IsNegative)
: Node(NodeKind::IntegerLiteral), Value(Value), IsNegative(IsNegative) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
uint64_t Value = 0;
bool IsNegative = false;
};
struct RttiBaseClassDescriptorNode : public IdentifierNode {
RttiBaseClassDescriptorNode()
: IdentifierNode(NodeKind::RttiBaseClassDescriptor) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
uint32_t NVOffset = 0;
int32_t VBPtrOffset = 0;
uint32_t VBTableOffset = 0;
uint32_t Flags = 0;
};
struct SymbolNode : public Node {
explicit SymbolNode(NodeKind K) : Node(K) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
QualifiedNameNode *Name = nullptr;
};
struct SpecialTableSymbolNode : public SymbolNode {
explicit SpecialTableSymbolNode()
: SymbolNode(NodeKind::SpecialTableSymbol) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
QualifiedNameNode *TargetName = nullptr;
Qualifiers Quals;
};
struct LocalStaticGuardVariableNode : public SymbolNode {
LocalStaticGuardVariableNode()
: SymbolNode(NodeKind::LocalStaticGuardVariable) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
bool IsVisible = false;
};
struct EncodedStringLiteralNode : public SymbolNode {
EncodedStringLiteralNode() : SymbolNode(NodeKind::EncodedStringLiteral) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
StringView DecodedString;
bool IsTruncated = false;
CharKind Char = CharKind::Char;
};
struct VariableSymbolNode : public SymbolNode {
VariableSymbolNode() : SymbolNode(NodeKind::VariableSymbol) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
StorageClass SC = StorageClass::None;
TypeNode *Type = nullptr;
};
struct FunctionSymbolNode : public SymbolNode {
FunctionSymbolNode() : SymbolNode(NodeKind::FunctionSymbol) {}
void output(OutputStream &OS, OutputFlags Flags) const override;
FunctionSignatureNode *Signature = nullptr;
};
} // namespace ms_demangle
} // namespace llvm
#endif
Index: stable/12/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h (revision 356465)
@@ -1,312 +1,314 @@
//===- GVN.h - Eliminate redundant values and loads -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's Global Value Numbering pass
/// which eliminates fully redundant instructions. It also does somewhat Ad-Hoc
/// PRE and dead load elimination.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_GVN_H
#define LLVM_TRANSFORMS_SCALAR_GVN_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/InstructionPrecedenceTracking.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <utility>
#include <vector>
namespace llvm {
class AssumptionCache;
class BasicBlock;
class BranchInst;
class CallInst;
class Constant;
class ExtractValueInst;
class Function;
class FunctionPass;
class IntrinsicInst;
class LoadInst;
class LoopInfo;
class OptimizationRemarkEmitter;
class PHINode;
class TargetLibraryInfo;
class Value;
/// A private "module" namespace for types and utilities used by GVN. These
/// are implementation details and should not be used by clients.
namespace gvn LLVM_LIBRARY_VISIBILITY {
struct AvailableValue;
struct AvailableValueInBlock;
class GVNLegacyPass;
} // end namespace gvn
/// The core GVN pass object.
///
/// FIXME: We should have a good summary of the GVN algorithm implemented by
/// this particular pass here.
class GVN : public PassInfoMixin<GVN> {
public:
struct Expression;
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
/// This removes the specified instruction from
/// our various maps and marks it for deletion.
void markInstructionForDeletion(Instruction *I) {
VN.erase(I);
InstrsToErase.push_back(I);
}
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceResults &getMemDep() const { return *MD; }
/// This class holds the mapping between values and value numbers. It is used
/// as an efficient mechanism to determine the expression-wise equivalence of
/// two values.
class ValueTable {
DenseMap<Value *, uint32_t> valueNumbering;
DenseMap<Expression, uint32_t> expressionNumbering;
// Expressions is the vector of Expression. ExprIdx is the mapping from
// value number to the index of Expression in Expressions. We use it
// instead of a DenseMap because filling such mapping is faster than
// filling a DenseMap and the compile time is a little better.
uint32_t nextExprNumber;
std::vector<Expression> Expressions;
std::vector<uint32_t> ExprIdx;
// Value number to PHINode mapping. Used for phi-translate in scalarpre.
DenseMap<uint32_t, PHINode *> NumberingPhi;
// Cache for phi-translate in scalarpre.
using PhiTranslateMap =
DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>;
PhiTranslateMap PhiTranslateTable;
AliasAnalysis *AA;
MemoryDependenceResults *MD;
DominatorTree *DT;
uint32_t nextValueNumber = 1;
Expression createExpr(Instruction *I);
Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
Value *LHS, Value *RHS);
Expression createExtractvalueExpr(ExtractValueInst *EI);
uint32_t lookupOrAddCall(CallInst *C);
uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
uint32_t Num, GVN &Gvn);
+ bool areCallValsEqual(uint32_t Num, uint32_t NewNum, const BasicBlock *Pred,
+ const BasicBlock *PhiBlock, GVN &Gvn);
std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
public:
ValueTable();
ValueTable(const ValueTable &Arg);
ValueTable(ValueTable &&Arg);
~ValueTable();
uint32_t lookupOrAdd(Value *V);
uint32_t lookup(Value *V, bool Verify = true) const;
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
uint32_t Num, GVN &Gvn);
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
void erase(Value *v);
void setAliasAnalysis(AliasAnalysis *A) { AA = A; }
AliasAnalysis *getAliasAnalysis() const { return AA; }
void setMemDep(MemoryDependenceResults *M) { MD = M; }
void setDomTree(DominatorTree *D) { DT = D; }
uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
void verifyRemoved(const Value *) const;
};
private:
friend class gvn::GVNLegacyPass;
friend struct DenseMapInfo<Expression>;
MemoryDependenceResults *MD;
DominatorTree *DT;
const TargetLibraryInfo *TLI;
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
OptimizationRemarkEmitter *ORE;
ImplicitControlFlowTracking *ICF;
ValueTable VN;
/// A mapping from value numbers to lists of Value*'s that
/// have that value number. Use findLeader to query it.
struct LeaderTableEntry {
Value *Val;
const BasicBlock *BB;
LeaderTableEntry *Next;
};
DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
BumpPtrAllocator TableAllocator;
// Block-local map of equivalent values to their leader, does not
// propagate to any successors. Entries added mid-block are applied
// to the remaining instructions in the block.
SmallMapVector<Value *, Constant *, 4> ReplaceWithConstMap;
SmallVector<Instruction *, 8> InstrsToErase;
// Map the block to reversed postorder traversal number. It is used to
// find back edge easily.
DenseMap<AssertingVH<BasicBlock>, uint32_t> BlockRPONumber;
// This is set 'true' initially and also when new blocks have been added to
// the function being analyzed. This boolean is used to control the updating
// of BlockRPONumber prior to accessing the contents of BlockRPONumber.
bool InvalidBlockRPONumbers = true;
using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
using UnavailBlkVect = SmallVector<BasicBlock *, 64>;
bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
const TargetLibraryInfo &RunTLI, AAResults &RunAA,
MemoryDependenceResults *RunMD, LoopInfo *LI,
OptimizationRemarkEmitter *ORE);
/// Push a new Value to the LeaderTable onto the list for its value number.
void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
LeaderTableEntry &Curr = LeaderTable[N];
if (!Curr.Val) {
Curr.Val = V;
Curr.BB = BB;
return;
}
LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
Node->Val = V;
Node->BB = BB;
Node->Next = Curr.Next;
Curr.Next = Node;
}
/// Scan the list of values corresponding to a given
/// value number, and remove the given instruction if encountered.
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
LeaderTableEntry *Prev = nullptr;
LeaderTableEntry *Curr = &LeaderTable[N];
while (Curr && (Curr->Val != I || Curr->BB != BB)) {
Prev = Curr;
Curr = Curr->Next;
}
if (!Curr)
return;
if (Prev) {
Prev->Next = Curr->Next;
} else {
if (!Curr->Next) {
Curr->Val = nullptr;
Curr->BB = nullptr;
} else {
LeaderTableEntry *Next = Curr->Next;
Curr->Val = Next->Val;
Curr->BB = Next->BB;
Curr->Next = Next->Next;
}
}
}
// List of critical edges to be split between iterations.
SmallVector<std::pair<Instruction *, unsigned>, 4> toSplit;
// Helper functions of redundant load elimination
bool processLoad(LoadInst *L);
bool processNonLocalLoad(LoadInst *L);
bool processAssumeIntrinsic(IntrinsicInst *II);
/// Given a local dependency (Def or Clobber) determine if a value is
/// available for the load. Returns true if an value is known to be
/// available and populates Res. Returns false otherwise.
bool AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
Value *Address, gvn::AvailableValue &Res);
/// Given a list of non-local dependencies, determine if a value is
/// available for the load in each specified block. If it is, add it to
/// ValuesPerBlock. If not, add it to UnavailableBlocks.
void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks);
bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks);
// Other helper routines
bool processInstruction(Instruction *I);
bool processBlock(BasicBlock *BB);
void dump(DenseMap<uint32_t, Value *> &d) const;
bool iterateOnFunction(Function &F);
bool performPRE(Function &F);
bool performScalarPRE(Instruction *I);
bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
BasicBlock *Curr, unsigned int ValNo);
Value *findLeader(const BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void fillImplicitControlFlowInfo(BasicBlock *BB);
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
bool replaceOperandsWithConsts(Instruction *I) const;
bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
bool DominatesByEdge);
bool processFoldableCondBr(BranchInst *BI);
void addDeadBlock(BasicBlock *BB);
void assignValNumForDeadCode();
void assignBlockRPONumber(Function &F);
};
/// Create a legacy GVN pass. This also allows parameterizing whether or not
/// loads are eliminated by the pass.
FunctionPass *createGVNPass(bool NoLoads = false);
/// A simple and fast domtree-based GVN pass to hoist common expressions
/// from sibling branches.
struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// Uses an "inverted" value numbering to decide the similarity of
/// expressions and sinks similar expressions into successors.
struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm
#endif // LLVM_TRANSFORMS_SCALAR_GVN_H
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp (revision 356465)
@@ -1,3144 +1,3158 @@
//===- llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing Microsoft CodeView debug info.
//
//===----------------------------------------------------------------------===//
#include "CodeViewDebug.h"
#include "DwarfExpression.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
#include "llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h"
#include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
#include "llvm/DebugInfo/CodeView/EnumTables.h"
#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeTableCollection.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <cctype>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
using namespace llvm;
using namespace llvm::codeview;
namespace {
class CVMCAdapter : public CodeViewRecordStreamer {
public:
CVMCAdapter(MCStreamer &OS) : OS(&OS) {}
void EmitBytes(StringRef Data) { OS->EmitBytes(Data); }
void EmitIntValue(uint64_t Value, unsigned Size) {
OS->EmitIntValueInHex(Value, Size);
}
void EmitBinaryData(StringRef Data) { OS->EmitBinaryData(Data); }
void AddComment(const Twine &T) { OS->AddComment(T); }
private:
MCStreamer *OS = nullptr;
};
} // namespace
static CPUType mapArchToCVCPUType(Triple::ArchType Type) {
switch (Type) {
case Triple::ArchType::x86:
return CPUType::Pentium3;
case Triple::ArchType::x86_64:
return CPUType::X64;
case Triple::ArchType::thumb:
return CPUType::Thumb;
case Triple::ArchType::aarch64:
return CPUType::ARM64;
default:
report_fatal_error("target architecture doesn't map to a CodeView CPUType");
}
}
CodeViewDebug::CodeViewDebug(AsmPrinter *AP)
: DebugHandlerBase(AP), OS(*Asm->OutStreamer), TypeTable(Allocator) {
// If module doesn't have named metadata anchors or COFF debug section
// is not available, skip any debug info related stuff.
if (!MMI->getModule()->getNamedMetadata("llvm.dbg.cu") ||
!AP->getObjFileLowering().getCOFFDebugSymbolsSection()) {
Asm = nullptr;
MMI->setDebugInfoAvailability(false);
return;
}
// Tell MMI that we have debug info.
MMI->setDebugInfoAvailability(true);
TheCPU =
mapArchToCVCPUType(Triple(MMI->getModule()->getTargetTriple()).getArch());
collectGlobalVariableInfo();
// Check if we should emit type record hashes.
ConstantInt *GH = mdconst::extract_or_null<ConstantInt>(
MMI->getModule()->getModuleFlag("CodeViewGHash"));
EmitDebugGlobalHashes = GH && !GH->isZero();
}
StringRef CodeViewDebug::getFullFilepath(const DIFile *File) {
std::string &Filepath = FileToFilepathMap[File];
if (!Filepath.empty())
return Filepath;
StringRef Dir = File->getDirectory(), Filename = File->getFilename();
// If this is a Unix-style path, just use it as is. Don't try to canonicalize
// it textually because one of the path components could be a symlink.
if (Dir.startswith("/") || Filename.startswith("/")) {
if (llvm::sys::path::is_absolute(Filename, llvm::sys::path::Style::posix))
return Filename;
Filepath = Dir;
if (Dir.back() != '/')
Filepath += '/';
Filepath += Filename;
return Filepath;
}
// Clang emits directory and relative filename info into the IR, but CodeView
// operates on full paths. We could change Clang to emit full paths too, but
// that would increase the IR size and probably not needed for other users.
// For now, just concatenate and canonicalize the path here.
if (Filename.find(':') == 1)
Filepath = Filename;
else
Filepath = (Dir + "\\" + Filename).str();
// Canonicalize the path. We have to do it textually because we may no longer
// have access the file in the filesystem.
// First, replace all slashes with backslashes.
std::replace(Filepath.begin(), Filepath.end(), '/', '\\');
// Remove all "\.\" with "\".
size_t Cursor = 0;
while ((Cursor = Filepath.find("\\.\\", Cursor)) != std::string::npos)
Filepath.erase(Cursor, 2);
// Replace all "\XXX\..\" with "\". Don't try too hard though as the original
// path should be well-formatted, e.g. start with a drive letter, etc.
Cursor = 0;
while ((Cursor = Filepath.find("\\..\\", Cursor)) != std::string::npos) {
// Something's wrong if the path starts with "\..\", abort.
if (Cursor == 0)
break;
size_t PrevSlash = Filepath.rfind('\\', Cursor - 1);
if (PrevSlash == std::string::npos)
// Something's wrong, abort.
break;
Filepath.erase(PrevSlash, Cursor + 3 - PrevSlash);
// The next ".." might be following the one we've just erased.
Cursor = PrevSlash;
}
// Remove all duplicate backslashes.
Cursor = 0;
while ((Cursor = Filepath.find("\\\\", Cursor)) != std::string::npos)
Filepath.erase(Cursor, 1);
return Filepath;
}
unsigned CodeViewDebug::maybeRecordFile(const DIFile *F) {
StringRef FullPath = getFullFilepath(F);
unsigned NextId = FileIdMap.size() + 1;
auto Insertion = FileIdMap.insert(std::make_pair(FullPath, NextId));
if (Insertion.second) {
// We have to compute the full filepath and emit a .cv_file directive.
ArrayRef<uint8_t> ChecksumAsBytes;
FileChecksumKind CSKind = FileChecksumKind::None;
if (F->getChecksum()) {
std::string Checksum = fromHex(F->getChecksum()->Value);
void *CKMem = OS.getContext().allocate(Checksum.size(), 1);
memcpy(CKMem, Checksum.data(), Checksum.size());
ChecksumAsBytes = ArrayRef<uint8_t>(
reinterpret_cast<const uint8_t *>(CKMem), Checksum.size());
switch (F->getChecksum()->Kind) {
case DIFile::CSK_MD5: CSKind = FileChecksumKind::MD5; break;
case DIFile::CSK_SHA1: CSKind = FileChecksumKind::SHA1; break;
}
}
bool Success = OS.EmitCVFileDirective(NextId, FullPath, ChecksumAsBytes,
static_cast<unsigned>(CSKind));
(void)Success;
assert(Success && ".cv_file directive failed");
}
return Insertion.first->second;
}
CodeViewDebug::InlineSite &
CodeViewDebug::getInlineSite(const DILocation *InlinedAt,
const DISubprogram *Inlinee) {
auto SiteInsertion = CurFn->InlineSites.insert({InlinedAt, InlineSite()});
InlineSite *Site = &SiteInsertion.first->second;
if (SiteInsertion.second) {
unsigned ParentFuncId = CurFn->FuncId;
if (const DILocation *OuterIA = InlinedAt->getInlinedAt())
ParentFuncId =
getInlineSite(OuterIA, InlinedAt->getScope()->getSubprogram())
.SiteFuncId;
Site->SiteFuncId = NextFuncId++;
OS.EmitCVInlineSiteIdDirective(
Site->SiteFuncId, ParentFuncId, maybeRecordFile(InlinedAt->getFile()),
InlinedAt->getLine(), InlinedAt->getColumn(), SMLoc());
Site->Inlinee = Inlinee;
InlinedSubprograms.insert(Inlinee);
getFuncIdForSubprogram(Inlinee);
}
return *Site;
}
static StringRef getPrettyScopeName(const DIScope *Scope) {
StringRef ScopeName = Scope->getName();
if (!ScopeName.empty())
return ScopeName;
switch (Scope->getTag()) {
case dwarf::DW_TAG_enumeration_type:
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
return "<unnamed-tag>";
case dwarf::DW_TAG_namespace:
return "`anonymous namespace'";
}
return StringRef();
}
static const DISubprogram *getQualifiedNameComponents(
const DIScope *Scope, SmallVectorImpl<StringRef> &QualifiedNameComponents) {
const DISubprogram *ClosestSubprogram = nullptr;
while (Scope != nullptr) {
if (ClosestSubprogram == nullptr)
ClosestSubprogram = dyn_cast<DISubprogram>(Scope);
StringRef ScopeName = getPrettyScopeName(Scope);
if (!ScopeName.empty())
QualifiedNameComponents.push_back(ScopeName);
Scope = Scope->getScope();
}
return ClosestSubprogram;
}
static std::string getQualifiedName(ArrayRef<StringRef> QualifiedNameComponents,
StringRef TypeName) {
std::string FullyQualifiedName;
for (StringRef QualifiedNameComponent :
llvm::reverse(QualifiedNameComponents)) {
FullyQualifiedName.append(QualifiedNameComponent);
FullyQualifiedName.append("::");
}
FullyQualifiedName.append(TypeName);
return FullyQualifiedName;
}
static std::string getFullyQualifiedName(const DIScope *Scope, StringRef Name) {
SmallVector<StringRef, 5> QualifiedNameComponents;
getQualifiedNameComponents(Scope, QualifiedNameComponents);
return getQualifiedName(QualifiedNameComponents, Name);
}
struct CodeViewDebug::TypeLoweringScope {
TypeLoweringScope(CodeViewDebug &CVD) : CVD(CVD) { ++CVD.TypeEmissionLevel; }
~TypeLoweringScope() {
// Don't decrement TypeEmissionLevel until after emitting deferred types, so
// inner TypeLoweringScopes don't attempt to emit deferred types.
if (CVD.TypeEmissionLevel == 1)
CVD.emitDeferredCompleteTypes();
--CVD.TypeEmissionLevel;
}
CodeViewDebug &CVD;
};
static std::string getFullyQualifiedName(const DIScope *Ty) {
const DIScope *Scope = Ty->getScope();
return getFullyQualifiedName(Scope, getPrettyScopeName(Ty));
}
TypeIndex CodeViewDebug::getScopeIndex(const DIScope *Scope) {
// No scope means global scope and that uses the zero index.
if (!Scope || isa<DIFile>(Scope))
return TypeIndex();
assert(!isa<DIType>(Scope) && "shouldn't make a namespace scope for a type");
// Check if we've already translated this scope.
auto I = TypeIndices.find({Scope, nullptr});
if (I != TypeIndices.end())
return I->second;
// Build the fully qualified name of the scope.
std::string ScopeName = getFullyQualifiedName(Scope);
StringIdRecord SID(TypeIndex(), ScopeName);
auto TI = TypeTable.writeLeafType(SID);
return recordTypeIndexForDINode(Scope, TI);
}
TypeIndex CodeViewDebug::getFuncIdForSubprogram(const DISubprogram *SP) {
assert(SP);
// Check if we've already translated this subprogram.
auto I = TypeIndices.find({SP, nullptr});
if (I != TypeIndices.end())
return I->second;
// The display name includes function template arguments. Drop them to match
// MSVC.
StringRef DisplayName = SP->getName().split('<').first;
const DIScope *Scope = SP->getScope();
TypeIndex TI;
if (const auto *Class = dyn_cast_or_null<DICompositeType>(Scope)) {
// If the scope is a DICompositeType, then this must be a method. Member
// function types take some special handling, and require access to the
// subprogram.
TypeIndex ClassType = getTypeIndex(Class);
MemberFuncIdRecord MFuncId(ClassType, getMemberFunctionType(SP, Class),
DisplayName);
TI = TypeTable.writeLeafType(MFuncId);
} else {
// Otherwise, this must be a free function.
TypeIndex ParentScope = getScopeIndex(Scope);
FuncIdRecord FuncId(ParentScope, getTypeIndex(SP->getType()), DisplayName);
TI = TypeTable.writeLeafType(FuncId);
}
return recordTypeIndexForDINode(SP, TI);
}
static bool isNonTrivial(const DICompositeType *DCTy) {
return ((DCTy->getFlags() & DINode::FlagNonTrivial) == DINode::FlagNonTrivial);
}
static FunctionOptions
getFunctionOptions(const DISubroutineType *Ty,
const DICompositeType *ClassTy = nullptr,
StringRef SPName = StringRef("")) {
FunctionOptions FO = FunctionOptions::None;
const DIType *ReturnTy = nullptr;
if (auto TypeArray = Ty->getTypeArray()) {
if (TypeArray.size())
ReturnTy = TypeArray[0];
}
if (auto *ReturnDCTy = dyn_cast_or_null<DICompositeType>(ReturnTy)) {
if (isNonTrivial(ReturnDCTy))
FO |= FunctionOptions::CxxReturnUdt;
}
// DISubroutineType is unnamed. Use DISubprogram's i.e. SPName in comparison.
if (ClassTy && isNonTrivial(ClassTy) && SPName == ClassTy->getName()) {
FO |= FunctionOptions::Constructor;
// TODO: put the FunctionOptions::ConstructorWithVirtualBases flag.
}
return FO;
}
TypeIndex CodeViewDebug::getMemberFunctionType(const DISubprogram *SP,
const DICompositeType *Class) {
// Always use the method declaration as the key for the function type. The
// method declaration contains the this adjustment.
if (SP->getDeclaration())
SP = SP->getDeclaration();
assert(!SP->getDeclaration() && "should use declaration as key");
// Key the MemberFunctionRecord into the map as {SP, Class}. It won't collide
// with the MemberFuncIdRecord, which is keyed in as {SP, nullptr}.
auto I = TypeIndices.find({SP, Class});
if (I != TypeIndices.end())
return I->second;
// Make sure complete type info for the class is emitted *after* the member
// function type, as the complete class type is likely to reference this
// member function type.
TypeLoweringScope S(*this);
const bool IsStaticMethod = (SP->getFlags() & DINode::FlagStaticMember) != 0;
FunctionOptions FO = getFunctionOptions(SP->getType(), Class, SP->getName());
TypeIndex TI = lowerTypeMemberFunction(
SP->getType(), Class, SP->getThisAdjustment(), IsStaticMethod, FO);
return recordTypeIndexForDINode(SP, TI, Class);
}
TypeIndex CodeViewDebug::recordTypeIndexForDINode(const DINode *Node,
TypeIndex TI,
const DIType *ClassTy) {
auto InsertResult = TypeIndices.insert({{Node, ClassTy}, TI});
(void)InsertResult;
assert(InsertResult.second && "DINode was already assigned a type index");
return TI;
}
unsigned CodeViewDebug::getPointerSizeInBytes() {
return MMI->getModule()->getDataLayout().getPointerSizeInBits() / 8;
}
void CodeViewDebug::recordLocalVariable(LocalVariable &&Var,
const LexicalScope *LS) {
if (const DILocation *InlinedAt = LS->getInlinedAt()) {
// This variable was inlined. Associate it with the InlineSite.
const DISubprogram *Inlinee = Var.DIVar->getScope()->getSubprogram();
InlineSite &Site = getInlineSite(InlinedAt, Inlinee);
Site.InlinedLocals.emplace_back(Var);
} else {
// This variable goes into the corresponding lexical scope.
ScopeVariables[LS].emplace_back(Var);
}
}
static void addLocIfNotPresent(SmallVectorImpl<const DILocation *> &Locs,
const DILocation *Loc) {
auto B = Locs.begin(), E = Locs.end();
if (std::find(B, E, Loc) == E)
Locs.push_back(Loc);
}
void CodeViewDebug::maybeRecordLocation(const DebugLoc &DL,
const MachineFunction *MF) {
// Skip this instruction if it has the same location as the previous one.
if (!DL || DL == PrevInstLoc)
return;
const DIScope *Scope = DL.get()->getScope();
if (!Scope)
return;
// Skip this line if it is longer than the maximum we can record.
LineInfo LI(DL.getLine(), DL.getLine(), /*IsStatement=*/true);
if (LI.getStartLine() != DL.getLine() || LI.isAlwaysStepInto() ||
LI.isNeverStepInto())
return;
ColumnInfo CI(DL.getCol(), /*EndColumn=*/0);
if (CI.getStartColumn() != DL.getCol())
return;
if (!CurFn->HaveLineInfo)
CurFn->HaveLineInfo = true;
unsigned FileId = 0;
if (PrevInstLoc.get() && PrevInstLoc->getFile() == DL->getFile())
FileId = CurFn->LastFileId;
else
FileId = CurFn->LastFileId = maybeRecordFile(DL->getFile());
PrevInstLoc = DL;
unsigned FuncId = CurFn->FuncId;
if (const DILocation *SiteLoc = DL->getInlinedAt()) {
const DILocation *Loc = DL.get();
// If this location was actually inlined from somewhere else, give it the ID
// of the inline call site.
FuncId =
getInlineSite(SiteLoc, Loc->getScope()->getSubprogram()).SiteFuncId;
// Ensure we have links in the tree of inline call sites.
bool FirstLoc = true;
while ((SiteLoc = Loc->getInlinedAt())) {
InlineSite &Site =
getInlineSite(SiteLoc, Loc->getScope()->getSubprogram());
if (!FirstLoc)
addLocIfNotPresent(Site.ChildSites, Loc);
FirstLoc = false;
Loc = SiteLoc;
}
addLocIfNotPresent(CurFn->ChildSites, Loc);
}
OS.EmitCVLocDirective(FuncId, FileId, DL.getLine(), DL.getCol(),
/*PrologueEnd=*/false, /*IsStmt=*/false,
DL->getFilename(), SMLoc());
}
void CodeViewDebug::emitCodeViewMagicVersion() {
OS.EmitValueToAlignment(4);
OS.AddComment("Debug section magic");
OS.EmitIntValue(COFF::DEBUG_SECTION_MAGIC, 4);
}
void CodeViewDebug::endModule() {
if (!Asm || !MMI->hasDebugInfo())
return;
assert(Asm != nullptr);
// The COFF .debug$S section consists of several subsections, each starting
// with a 4-byte control code (e.g. 0xF1, 0xF2, etc) and then a 4-byte length
// of the payload followed by the payload itself. The subsections are 4-byte
// aligned.
// Use the generic .debug$S section, and make a subsection for all the inlined
// subprograms.
switchToDebugSectionForSymbol(nullptr);
MCSymbol *CompilerInfo = beginCVSubsection(DebugSubsectionKind::Symbols);
emitCompilerInformation();
endCVSubsection(CompilerInfo);
emitInlineeLinesSubsection();
// Emit per-function debug information.
for (auto &P : FnDebugInfo)
if (!P.first->isDeclarationForLinker())
emitDebugInfoForFunction(P.first, *P.second);
// Emit global variable debug information.
setCurrentSubprogram(nullptr);
emitDebugInfoForGlobals();
// Emit retained types.
emitDebugInfoForRetainedTypes();
// Switch back to the generic .debug$S section after potentially processing
// comdat symbol sections.
switchToDebugSectionForSymbol(nullptr);
// Emit UDT records for any types used by global variables.
if (!GlobalUDTs.empty()) {
MCSymbol *SymbolsEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
emitDebugInfoForUDTs(GlobalUDTs);
endCVSubsection(SymbolsEnd);
}
// This subsection holds a file index to offset in string table table.
OS.AddComment("File index to string table offset subsection");
OS.EmitCVFileChecksumsDirective();
// This subsection holds the string table.
OS.AddComment("String table");
OS.EmitCVStringTableDirective();
// Emit S_BUILDINFO, which points to LF_BUILDINFO. Put this in its own symbol
// subsection in the generic .debug$S section at the end. There is no
// particular reason for this ordering other than to match MSVC.
emitBuildInfo();
// Emit type information and hashes last, so that any types we translate while
// emitting function info are included.
emitTypeInformation();
if (EmitDebugGlobalHashes)
emitTypeGlobalHashes();
clear();
}
static void
emitNullTerminatedSymbolName(MCStreamer &OS, StringRef S,
unsigned MaxFixedRecordLength = 0xF00) {
// The maximum CV record length is 0xFF00. Most of the strings we emit appear
// after a fixed length portion of the record. The fixed length portion should
// always be less than 0xF00 (3840) bytes, so truncate the string so that the
// overall record size is less than the maximum allowed.
SmallString<32> NullTerminatedString(
S.take_front(MaxRecordLength - MaxFixedRecordLength - 1));
NullTerminatedString.push_back('\0');
OS.EmitBytes(NullTerminatedString);
}
static StringRef getTypeLeafName(TypeLeafKind TypeKind) {
for (const EnumEntry<TypeLeafKind> &EE : getTypeLeafNames())
if (EE.Value == TypeKind)
return EE.Name;
return "";
}
void CodeViewDebug::emitTypeInformation() {
if (TypeTable.empty())
return;
// Start the .debug$T or .debug$P section with 0x4.
OS.SwitchSection(Asm->getObjFileLowering().getCOFFDebugTypesSection());
emitCodeViewMagicVersion();
SmallString<8> CommentPrefix;
if (OS.isVerboseAsm()) {
CommentPrefix += '\t';
CommentPrefix += Asm->MAI->getCommentString();
CommentPrefix += ' ';
}
TypeTableCollection Table(TypeTable.records());
SmallString<512> CommentBlock;
raw_svector_ostream CommentOS(CommentBlock);
std::unique_ptr<ScopedPrinter> SP;
std::unique_ptr<TypeDumpVisitor> TDV;
TypeVisitorCallbackPipeline Pipeline;
if (OS.isVerboseAsm()) {
// To construct block comment describing the type record for readability.
SP = llvm::make_unique<ScopedPrinter>(CommentOS);
SP->setPrefix(CommentPrefix);
TDV = llvm::make_unique<TypeDumpVisitor>(Table, SP.get(), false);
Pipeline.addCallbackToPipeline(*TDV);
}
// To emit type record using Codeview MCStreamer adapter
CVMCAdapter CVMCOS(OS);
TypeRecordMapping typeMapping(CVMCOS);
Pipeline.addCallbackToPipeline(typeMapping);
Optional<TypeIndex> B = Table.getFirst();
while (B) {
// This will fail if the record data is invalid.
CVType Record = Table.getType(*B);
CommentBlock.clear();
auto RecordLen = Record.length();
auto RecordKind = Record.kind();
if (OS.isVerboseAsm())
CVMCOS.AddComment("Record length");
CVMCOS.EmitIntValue(RecordLen - 2, 2);
if (OS.isVerboseAsm())
CVMCOS.AddComment("Record kind: " + getTypeLeafName(RecordKind));
CVMCOS.EmitIntValue(RecordKind, sizeof(RecordKind));
Error E = codeview::visitTypeRecord(Record, *B, Pipeline);
if (E) {
logAllUnhandledErrors(std::move(E), errs(), "error: ");
llvm_unreachable("produced malformed type record");
}
if (OS.isVerboseAsm()) {
// emitRawComment will insert its own tab and comment string before
// the first line, so strip off our first one. It also prints its own
// newline.
OS.emitRawComment(
CommentOS.str().drop_front(CommentPrefix.size() - 1).rtrim());
}
B = Table.getNext(*B);
}
}
void CodeViewDebug::emitTypeGlobalHashes() {
if (TypeTable.empty())
return;
// Start the .debug$H section with the version and hash algorithm, currently
// hardcoded to version 0, SHA1.
OS.SwitchSection(Asm->getObjFileLowering().getCOFFGlobalTypeHashesSection());
OS.EmitValueToAlignment(4);
OS.AddComment("Magic");
OS.EmitIntValue(COFF::DEBUG_HASHES_SECTION_MAGIC, 4);
OS.AddComment("Section Version");
OS.EmitIntValue(0, 2);
OS.AddComment("Hash Algorithm");
OS.EmitIntValue(uint16_t(GlobalTypeHashAlg::SHA1_8), 2);
TypeIndex TI(TypeIndex::FirstNonSimpleIndex);
for (const auto &GHR : TypeTable.hashes()) {
if (OS.isVerboseAsm()) {
// Emit an EOL-comment describing which TypeIndex this hash corresponds
// to, as well as the stringified SHA1 hash.
SmallString<32> Comment;
raw_svector_ostream CommentOS(Comment);
CommentOS << formatv("{0:X+} [{1}]", TI.getIndex(), GHR);
OS.AddComment(Comment);
++TI;
}
assert(GHR.Hash.size() == 8);
StringRef S(reinterpret_cast<const char *>(GHR.Hash.data()),
GHR.Hash.size());
OS.EmitBinaryData(S);
}
}
static SourceLanguage MapDWLangToCVLang(unsigned DWLang) {
switch (DWLang) {
case dwarf::DW_LANG_C:
case dwarf::DW_LANG_C89:
case dwarf::DW_LANG_C99:
case dwarf::DW_LANG_C11:
case dwarf::DW_LANG_ObjC:
return SourceLanguage::C;
case dwarf::DW_LANG_C_plus_plus:
case dwarf::DW_LANG_C_plus_plus_03:
case dwarf::DW_LANG_C_plus_plus_11:
case dwarf::DW_LANG_C_plus_plus_14:
return SourceLanguage::Cpp;
case dwarf::DW_LANG_Fortran77:
case dwarf::DW_LANG_Fortran90:
case dwarf::DW_LANG_Fortran03:
case dwarf::DW_LANG_Fortran08:
return SourceLanguage::Fortran;
case dwarf::DW_LANG_Pascal83:
return SourceLanguage::Pascal;
case dwarf::DW_LANG_Cobol74:
case dwarf::DW_LANG_Cobol85:
return SourceLanguage::Cobol;
case dwarf::DW_LANG_Java:
return SourceLanguage::Java;
case dwarf::DW_LANG_D:
return SourceLanguage::D;
case dwarf::DW_LANG_Swift:
return SourceLanguage::Swift;
default:
// There's no CodeView representation for this language, and CV doesn't
// have an "unknown" option for the language field, so we'll use MASM,
// as it's very low level.
return SourceLanguage::Masm;
}
}
namespace {
struct Version {
int Part[4];
};
} // end anonymous namespace
// Takes a StringRef like "clang 4.0.0.0 (other nonsense 123)" and parses out
// the version number.
static Version parseVersion(StringRef Name) {
Version V = {{0}};
int N = 0;
for (const char C : Name) {
if (isdigit(C)) {
V.Part[N] *= 10;
V.Part[N] += C - '0';
} else if (C == '.') {
++N;
if (N >= 4)
return V;
} else if (N > 0)
return V;
}
return V;
}
void CodeViewDebug::emitCompilerInformation() {
MCSymbol *CompilerEnd = beginSymbolRecord(SymbolKind::S_COMPILE3);
uint32_t Flags = 0;
NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
const MDNode *Node = *CUs->operands().begin();
const auto *CU = cast<DICompileUnit>(Node);
// The low byte of the flags indicates the source language.
Flags = MapDWLangToCVLang(CU->getSourceLanguage());
// TODO: Figure out which other flags need to be set.
OS.AddComment("Flags and language");
OS.EmitIntValue(Flags, 4);
OS.AddComment("CPUType");
OS.EmitIntValue(static_cast<uint64_t>(TheCPU), 2);
StringRef CompilerVersion = CU->getProducer();
Version FrontVer = parseVersion(CompilerVersion);
OS.AddComment("Frontend version");
for (int N = 0; N < 4; ++N)
OS.EmitIntValue(FrontVer.Part[N], 2);
// Some Microsoft tools, like Binscope, expect a backend version number of at
// least 8.something, so we'll coerce the LLVM version into a form that
// guarantees it'll be big enough without really lying about the version.
int Major = 1000 * LLVM_VERSION_MAJOR +
10 * LLVM_VERSION_MINOR +
LLVM_VERSION_PATCH;
// Clamp it for builds that use unusually large version numbers.
Major = std::min<int>(Major, std::numeric_limits<uint16_t>::max());
Version BackVer = {{ Major, 0, 0, 0 }};
OS.AddComment("Backend version");
for (int N = 0; N < 4; ++N)
OS.EmitIntValue(BackVer.Part[N], 2);
OS.AddComment("Null-terminated compiler version string");
emitNullTerminatedSymbolName(OS, CompilerVersion);
endSymbolRecord(CompilerEnd);
}
static TypeIndex getStringIdTypeIdx(GlobalTypeTableBuilder &TypeTable,
StringRef S) {
StringIdRecord SIR(TypeIndex(0x0), S);
return TypeTable.writeLeafType(SIR);
}
void CodeViewDebug::emitBuildInfo() {
// First, make LF_BUILDINFO. It's a sequence of strings with various bits of
// build info. The known prefix is:
// - Absolute path of current directory
// - Compiler path
// - Main source file path, relative to CWD or absolute
// - Type server PDB file
// - Canonical compiler command line
// If frontend and backend compilation are separated (think llc or LTO), it's
// not clear if the compiler path should refer to the executable for the
// frontend or the backend. Leave it blank for now.
TypeIndex BuildInfoArgs[BuildInfoRecord::MaxArgs] = {};
NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
const MDNode *Node = *CUs->operands().begin(); // FIXME: Multiple CUs.
const auto *CU = cast<DICompileUnit>(Node);
const DIFile *MainSourceFile = CU->getFile();
BuildInfoArgs[BuildInfoRecord::CurrentDirectory] =
getStringIdTypeIdx(TypeTable, MainSourceFile->getDirectory());
BuildInfoArgs[BuildInfoRecord::SourceFile] =
getStringIdTypeIdx(TypeTable, MainSourceFile->getFilename());
// FIXME: Path to compiler and command line. PDB is intentionally blank unless
// we implement /Zi type servers.
BuildInfoRecord BIR(BuildInfoArgs);
TypeIndex BuildInfoIndex = TypeTable.writeLeafType(BIR);
// Make a new .debug$S subsection for the S_BUILDINFO record, which points
// from the module symbols into the type stream.
MCSymbol *BISubsecEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
MCSymbol *BIEnd = beginSymbolRecord(SymbolKind::S_BUILDINFO);
OS.AddComment("LF_BUILDINFO index");
OS.EmitIntValue(BuildInfoIndex.getIndex(), 4);
endSymbolRecord(BIEnd);
endCVSubsection(BISubsecEnd);
}
void CodeViewDebug::emitInlineeLinesSubsection() {
if (InlinedSubprograms.empty())
return;
OS.AddComment("Inlinee lines subsection");
MCSymbol *InlineEnd = beginCVSubsection(DebugSubsectionKind::InlineeLines);
// We emit the checksum info for files. This is used by debuggers to
// determine if a pdb matches the source before loading it. Visual Studio,
// for instance, will display a warning that the breakpoints are not valid if
// the pdb does not match the source.
OS.AddComment("Inlinee lines signature");
OS.EmitIntValue(unsigned(InlineeLinesSignature::Normal), 4);
for (const DISubprogram *SP : InlinedSubprograms) {
assert(TypeIndices.count({SP, nullptr}));
TypeIndex InlineeIdx = TypeIndices[{SP, nullptr}];
OS.AddBlankLine();
unsigned FileId = maybeRecordFile(SP->getFile());
OS.AddComment("Inlined function " + SP->getName() + " starts at " +
SP->getFilename() + Twine(':') + Twine(SP->getLine()));
OS.AddBlankLine();
OS.AddComment("Type index of inlined function");
OS.EmitIntValue(InlineeIdx.getIndex(), 4);
OS.AddComment("Offset into filechecksum table");
OS.EmitCVFileChecksumOffsetDirective(FileId);
OS.AddComment("Starting line number");
OS.EmitIntValue(SP->getLine(), 4);
}
endCVSubsection(InlineEnd);
}
void CodeViewDebug::emitInlinedCallSite(const FunctionInfo &FI,
const DILocation *InlinedAt,
const InlineSite &Site) {
assert(TypeIndices.count({Site.Inlinee, nullptr}));
TypeIndex InlineeIdx = TypeIndices[{Site.Inlinee, nullptr}];
// SymbolRecord
MCSymbol *InlineEnd = beginSymbolRecord(SymbolKind::S_INLINESITE);
OS.AddComment("PtrParent");
OS.EmitIntValue(0, 4);
OS.AddComment("PtrEnd");
OS.EmitIntValue(0, 4);
OS.AddComment("Inlinee type index");
OS.EmitIntValue(InlineeIdx.getIndex(), 4);
unsigned FileId = maybeRecordFile(Site.Inlinee->getFile());
unsigned StartLineNum = Site.Inlinee->getLine();
OS.EmitCVInlineLinetableDirective(Site.SiteFuncId, FileId, StartLineNum,
FI.Begin, FI.End);
endSymbolRecord(InlineEnd);
emitLocalVariableList(FI, Site.InlinedLocals);
// Recurse on child inlined call sites before closing the scope.
for (const DILocation *ChildSite : Site.ChildSites) {
auto I = FI.InlineSites.find(ChildSite);
assert(I != FI.InlineSites.end() &&
"child site not in function inline site map");
emitInlinedCallSite(FI, ChildSite, I->second);
}
// Close the scope.
emitEndSymbolRecord(SymbolKind::S_INLINESITE_END);
}
void CodeViewDebug::switchToDebugSectionForSymbol(const MCSymbol *GVSym) {
// If we have a symbol, it may be in a section that is COMDAT. If so, find the
// comdat key. A section may be comdat because of -ffunction-sections or
// because it is comdat in the IR.
MCSectionCOFF *GVSec =
GVSym ? dyn_cast<MCSectionCOFF>(&GVSym->getSection()) : nullptr;
const MCSymbol *KeySym = GVSec ? GVSec->getCOMDATSymbol() : nullptr;
MCSectionCOFF *DebugSec = cast<MCSectionCOFF>(
Asm->getObjFileLowering().getCOFFDebugSymbolsSection());
DebugSec = OS.getContext().getAssociativeCOFFSection(DebugSec, KeySym);
OS.SwitchSection(DebugSec);
// Emit the magic version number if this is the first time we've switched to
// this section.
if (ComdatDebugSections.insert(DebugSec).second)
emitCodeViewMagicVersion();
}
// Emit an S_THUNK32/S_END symbol pair for a thunk routine.
// The only supported thunk ordinal is currently the standard type.
void CodeViewDebug::emitDebugInfoForThunk(const Function *GV,
FunctionInfo &FI,
const MCSymbol *Fn) {
std::string FuncName = GlobalValue::dropLLVMManglingEscape(GV->getName());
const ThunkOrdinal ordinal = ThunkOrdinal::Standard; // Only supported kind.
OS.AddComment("Symbol subsection for " + Twine(FuncName));
MCSymbol *SymbolsEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
// Emit S_THUNK32
MCSymbol *ThunkRecordEnd = beginSymbolRecord(SymbolKind::S_THUNK32);
OS.AddComment("PtrParent");
OS.EmitIntValue(0, 4);
OS.AddComment("PtrEnd");
OS.EmitIntValue(0, 4);
OS.AddComment("PtrNext");
OS.EmitIntValue(0, 4);
OS.AddComment("Thunk section relative address");
OS.EmitCOFFSecRel32(Fn, /*Offset=*/0);
OS.AddComment("Thunk section index");
OS.EmitCOFFSectionIndex(Fn);
OS.AddComment("Code size");
OS.emitAbsoluteSymbolDiff(FI.End, Fn, 2);
OS.AddComment("Ordinal");
OS.EmitIntValue(unsigned(ordinal), 1);
OS.AddComment("Function name");
emitNullTerminatedSymbolName(OS, FuncName);
// Additional fields specific to the thunk ordinal would go here.
endSymbolRecord(ThunkRecordEnd);
// Local variables/inlined routines are purposely omitted here. The point of
// marking this as a thunk is so Visual Studio will NOT stop in this routine.
// Emit S_PROC_ID_END
emitEndSymbolRecord(SymbolKind::S_PROC_ID_END);
endCVSubsection(SymbolsEnd);
}
void CodeViewDebug::emitDebugInfoForFunction(const Function *GV,
FunctionInfo &FI) {
// For each function there is a separate subsection which holds the PC to
// file:line table.
const MCSymbol *Fn = Asm->getSymbol(GV);
assert(Fn);
// Switch to the to a comdat section, if appropriate.
switchToDebugSectionForSymbol(Fn);
std::string FuncName;
auto *SP = GV->getSubprogram();
assert(SP);
setCurrentSubprogram(SP);
if (SP->isThunk()) {
emitDebugInfoForThunk(GV, FI, Fn);
return;
}
// If we have a display name, build the fully qualified name by walking the
// chain of scopes.
if (!SP->getName().empty())
FuncName = getFullyQualifiedName(SP->getScope(), SP->getName());
// If our DISubprogram name is empty, use the mangled name.
if (FuncName.empty())
FuncName = GlobalValue::dropLLVMManglingEscape(GV->getName());
// Emit FPO data, but only on 32-bit x86. No other platforms use it.
if (Triple(MMI->getModule()->getTargetTriple()).getArch() == Triple::x86)
OS.EmitCVFPOData(Fn);
// Emit a symbol subsection, required by VS2012+ to find function boundaries.
OS.AddComment("Symbol subsection for " + Twine(FuncName));
MCSymbol *SymbolsEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
{
SymbolKind ProcKind = GV->hasLocalLinkage() ? SymbolKind::S_LPROC32_ID
: SymbolKind::S_GPROC32_ID;
MCSymbol *ProcRecordEnd = beginSymbolRecord(ProcKind);
// These fields are filled in by tools like CVPACK which run after the fact.
OS.AddComment("PtrParent");
OS.EmitIntValue(0, 4);
OS.AddComment("PtrEnd");
OS.EmitIntValue(0, 4);
OS.AddComment("PtrNext");
OS.EmitIntValue(0, 4);
// This is the important bit that tells the debugger where the function
// code is located and what's its size:
OS.AddComment("Code size");
OS.emitAbsoluteSymbolDiff(FI.End, Fn, 4);
OS.AddComment("Offset after prologue");
OS.EmitIntValue(0, 4);
OS.AddComment("Offset before epilogue");
OS.EmitIntValue(0, 4);
OS.AddComment("Function type index");
OS.EmitIntValue(getFuncIdForSubprogram(GV->getSubprogram()).getIndex(), 4);
OS.AddComment("Function section relative address");
OS.EmitCOFFSecRel32(Fn, /*Offset=*/0);
OS.AddComment("Function section index");
OS.EmitCOFFSectionIndex(Fn);
OS.AddComment("Flags");
OS.EmitIntValue(0, 1);
// Emit the function display name as a null-terminated string.
OS.AddComment("Function name");
// Truncate the name so we won't overflow the record length field.
emitNullTerminatedSymbolName(OS, FuncName);
endSymbolRecord(ProcRecordEnd);
MCSymbol *FrameProcEnd = beginSymbolRecord(SymbolKind::S_FRAMEPROC);
// Subtract out the CSR size since MSVC excludes that and we include it.
OS.AddComment("FrameSize");
OS.EmitIntValue(FI.FrameSize - FI.CSRSize, 4);
OS.AddComment("Padding");
OS.EmitIntValue(0, 4);
OS.AddComment("Offset of padding");
OS.EmitIntValue(0, 4);
OS.AddComment("Bytes of callee saved registers");
OS.EmitIntValue(FI.CSRSize, 4);
OS.AddComment("Exception handler offset");
OS.EmitIntValue(0, 4);
OS.AddComment("Exception handler section");
OS.EmitIntValue(0, 2);
OS.AddComment("Flags (defines frame register)");
OS.EmitIntValue(uint32_t(FI.FrameProcOpts), 4);
endSymbolRecord(FrameProcEnd);
emitLocalVariableList(FI, FI.Locals);
emitGlobalVariableList(FI.Globals);
emitLexicalBlockList(FI.ChildBlocks, FI);
// Emit inlined call site information. Only emit functions inlined directly
// into the parent function. We'll emit the other sites recursively as part
// of their parent inline site.
for (const DILocation *InlinedAt : FI.ChildSites) {
auto I = FI.InlineSites.find(InlinedAt);
assert(I != FI.InlineSites.end() &&
"child site not in function inline site map");
emitInlinedCallSite(FI, InlinedAt, I->second);
}
for (auto Annot : FI.Annotations) {
MCSymbol *Label = Annot.first;
MDTuple *Strs = cast<MDTuple>(Annot.second);
MCSymbol *AnnotEnd = beginSymbolRecord(SymbolKind::S_ANNOTATION);
OS.EmitCOFFSecRel32(Label, /*Offset=*/0);
// FIXME: Make sure we don't overflow the max record size.
OS.EmitCOFFSectionIndex(Label);
OS.EmitIntValue(Strs->getNumOperands(), 2);
for (Metadata *MD : Strs->operands()) {
// MDStrings are null terminated, so we can do EmitBytes and get the
// nice .asciz directive.
StringRef Str = cast<MDString>(MD)->getString();
assert(Str.data()[Str.size()] == '\0' && "non-nullterminated MDString");
OS.EmitBytes(StringRef(Str.data(), Str.size() + 1));
}
endSymbolRecord(AnnotEnd);
}
for (auto HeapAllocSite : FI.HeapAllocSites) {
- MCSymbol *BeginLabel = std::get<0>(HeapAllocSite);
- MCSymbol *EndLabel = std::get<1>(HeapAllocSite);
-
- // The labels might not be defined if the instruction was replaced
- // somewhere in the codegen pipeline.
- if (!BeginLabel->isDefined() || !EndLabel->isDefined())
- continue;
-
- DIType *DITy = std::get<2>(HeapAllocSite);
+ const MCSymbol *BeginLabel = std::get<0>(HeapAllocSite);
+ const MCSymbol *EndLabel = std::get<1>(HeapAllocSite);
+ const DIType *DITy = std::get<2>(HeapAllocSite);
MCSymbol *HeapAllocEnd = beginSymbolRecord(SymbolKind::S_HEAPALLOCSITE);
OS.AddComment("Call site offset");
OS.EmitCOFFSecRel32(BeginLabel, /*Offset=*/0);
OS.AddComment("Call site section index");
OS.EmitCOFFSectionIndex(BeginLabel);
OS.AddComment("Call instruction length");
OS.emitAbsoluteSymbolDiff(EndLabel, BeginLabel, 2);
OS.AddComment("Type index");
OS.EmitIntValue(getCompleteTypeIndex(DITy).getIndex(), 4);
endSymbolRecord(HeapAllocEnd);
}
if (SP != nullptr)
emitDebugInfoForUDTs(LocalUDTs);
// We're done with this function.
emitEndSymbolRecord(SymbolKind::S_PROC_ID_END);
}
endCVSubsection(SymbolsEnd);
// We have an assembler directive that takes care of the whole line table.
OS.EmitCVLinetableDirective(FI.FuncId, Fn, FI.End);
}
CodeViewDebug::LocalVarDefRange
CodeViewDebug::createDefRangeMem(uint16_t CVRegister, int Offset) {
LocalVarDefRange DR;
DR.InMemory = -1;
DR.DataOffset = Offset;
assert(DR.DataOffset == Offset && "truncation");
DR.IsSubfield = 0;
DR.StructOffset = 0;
DR.CVRegister = CVRegister;
return DR;
}
void CodeViewDebug::collectVariableInfoFromMFTable(
DenseSet<InlinedEntity> &Processed) {
const MachineFunction &MF = *Asm->MF;
const TargetSubtargetInfo &TSI = MF.getSubtarget();
const TargetFrameLowering *TFI = TSI.getFrameLowering();
const TargetRegisterInfo *TRI = TSI.getRegisterInfo();
for (const MachineFunction::VariableDbgInfo &VI : MF.getVariableDbgInfo()) {
if (!VI.Var)
continue;
assert(VI.Var->isValidLocationForIntrinsic(VI.Loc) &&
"Expected inlined-at fields to agree");
Processed.insert(InlinedEntity(VI.Var, VI.Loc->getInlinedAt()));
LexicalScope *Scope = LScopes.findLexicalScope(VI.Loc);
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
// If the variable has an attached offset expression, extract it.
// FIXME: Try to handle DW_OP_deref as well.
int64_t ExprOffset = 0;
bool Deref = false;
if (VI.Expr) {
// If there is one DW_OP_deref element, use offset of 0 and keep going.
if (VI.Expr->getNumElements() == 1 &&
VI.Expr->getElement(0) == llvm::dwarf::DW_OP_deref)
Deref = true;
else if (!VI.Expr->extractIfOffset(ExprOffset))
continue;
}
// Get the frame register used and the offset.
unsigned FrameReg = 0;
int FrameOffset = TFI->getFrameIndexReference(*Asm->MF, VI.Slot, FrameReg);
uint16_t CVReg = TRI->getCodeViewRegNum(FrameReg);
// Calculate the label ranges.
LocalVarDefRange DefRange =
createDefRangeMem(CVReg, FrameOffset + ExprOffset);
for (const InsnRange &Range : Scope->getRanges()) {
const MCSymbol *Begin = getLabelBeforeInsn(Range.first);
const MCSymbol *End = getLabelAfterInsn(Range.second);
End = End ? End : Asm->getFunctionEnd();
DefRange.Ranges.emplace_back(Begin, End);
}
LocalVariable Var;
Var.DIVar = VI.Var;
Var.DefRanges.emplace_back(std::move(DefRange));
if (Deref)
Var.UseReferenceType = true;
recordLocalVariable(std::move(Var), Scope);
}
}
static bool canUseReferenceType(const DbgVariableLocation &Loc) {
return !Loc.LoadChain.empty() && Loc.LoadChain.back() == 0;
}
static bool needsReferenceType(const DbgVariableLocation &Loc) {
return Loc.LoadChain.size() == 2 && Loc.LoadChain.back() == 0;
}
void CodeViewDebug::calculateRanges(
LocalVariable &Var, const DbgValueHistoryMap::Entries &Entries) {
const TargetRegisterInfo *TRI = Asm->MF->getSubtarget().getRegisterInfo();
// Calculate the definition ranges.
for (auto I = Entries.begin(), E = Entries.end(); I != E; ++I) {
const auto &Entry = *I;
if (!Entry.isDbgValue())
continue;
const MachineInstr *DVInst = Entry.getInstr();
assert(DVInst->isDebugValue() && "Invalid History entry");
// FIXME: Find a way to represent constant variables, since they are
// relatively common.
Optional<DbgVariableLocation> Location =
DbgVariableLocation::extractFromMachineInstruction(*DVInst);
if (!Location)
continue;
// CodeView can only express variables in register and variables in memory
// at a constant offset from a register. However, for variables passed
// indirectly by pointer, it is common for that pointer to be spilled to a
// stack location. For the special case of one offseted load followed by a
// zero offset load (a pointer spilled to the stack), we change the type of
// the local variable from a value type to a reference type. This tricks the
// debugger into doing the load for us.
if (Var.UseReferenceType) {
// We're using a reference type. Drop the last zero offset load.
if (canUseReferenceType(*Location))
Location->LoadChain.pop_back();
else
continue;
} else if (needsReferenceType(*Location)) {
// This location can't be expressed without switching to a reference type.
// Start over using that.
Var.UseReferenceType = true;
Var.DefRanges.clear();
calculateRanges(Var, Entries);
return;
}
// We can only handle a register or an offseted load of a register.
if (Location->Register == 0 || Location->LoadChain.size() > 1)
continue;
{
LocalVarDefRange DR;
DR.CVRegister = TRI->getCodeViewRegNum(Location->Register);
DR.InMemory = !Location->LoadChain.empty();
DR.DataOffset =
!Location->LoadChain.empty() ? Location->LoadChain.back() : 0;
if (Location->FragmentInfo) {
DR.IsSubfield = true;
DR.StructOffset = Location->FragmentInfo->OffsetInBits / 8;
} else {
DR.IsSubfield = false;
DR.StructOffset = 0;
}
if (Var.DefRanges.empty() ||
Var.DefRanges.back().isDifferentLocation(DR)) {
Var.DefRanges.emplace_back(std::move(DR));
}
}
// Compute the label range.
const MCSymbol *Begin = getLabelBeforeInsn(Entry.getInstr());
const MCSymbol *End;
if (Entry.getEndIndex() != DbgValueHistoryMap::NoEntry) {
auto &EndingEntry = Entries[Entry.getEndIndex()];
End = EndingEntry.isDbgValue()
? getLabelBeforeInsn(EndingEntry.getInstr())
: getLabelAfterInsn(EndingEntry.getInstr());
} else
End = Asm->getFunctionEnd();
// If the last range end is our begin, just extend the last range.
// Otherwise make a new range.
SmallVectorImpl<std::pair<const MCSymbol *, const MCSymbol *>> &R =
Var.DefRanges.back().Ranges;
if (!R.empty() && R.back().second == Begin)
R.back().second = End;
else
R.emplace_back(Begin, End);
// FIXME: Do more range combining.
}
}
void CodeViewDebug::collectVariableInfo(const DISubprogram *SP) {
DenseSet<InlinedEntity> Processed;
// Grab the variable info that was squirreled away in the MMI side-table.
collectVariableInfoFromMFTable(Processed);
for (const auto &I : DbgValues) {
InlinedEntity IV = I.first;
if (Processed.count(IV))
continue;
const DILocalVariable *DIVar = cast<DILocalVariable>(IV.first);
const DILocation *InlinedAt = IV.second;
// Instruction ranges, specifying where IV is accessible.
const auto &Entries = I.second;
LexicalScope *Scope = nullptr;
if (InlinedAt)
Scope = LScopes.findInlinedScope(DIVar->getScope(), InlinedAt);
else
Scope = LScopes.findLexicalScope(DIVar->getScope());
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
LocalVariable Var;
Var.DIVar = DIVar;
calculateRanges(Var, Entries);
recordLocalVariable(std::move(Var), Scope);
}
}
void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) {
const TargetSubtargetInfo &TSI = MF->getSubtarget();
const TargetRegisterInfo *TRI = TSI.getRegisterInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
const Function &GV = MF->getFunction();
auto Insertion = FnDebugInfo.insert({&GV, llvm::make_unique<FunctionInfo>()});
assert(Insertion.second && "function already has info");
CurFn = Insertion.first->second.get();
CurFn->FuncId = NextFuncId++;
CurFn->Begin = Asm->getFunctionBegin();
// The S_FRAMEPROC record reports the stack size, and how many bytes of
// callee-saved registers were used. For targets that don't use a PUSH
// instruction (AArch64), this will be zero.
CurFn->CSRSize = MFI.getCVBytesOfCalleeSavedRegisters();
CurFn->FrameSize = MFI.getStackSize();
CurFn->OffsetAdjustment = MFI.getOffsetAdjustment();
CurFn->HasStackRealignment = TRI->needsStackRealignment(*MF);
// For this function S_FRAMEPROC record, figure out which codeview register
// will be the frame pointer.
CurFn->EncodedParamFramePtrReg = EncodedFramePtrReg::None; // None.
CurFn->EncodedLocalFramePtrReg = EncodedFramePtrReg::None; // None.
if (CurFn->FrameSize > 0) {
if (!TSI.getFrameLowering()->hasFP(*MF)) {
CurFn->EncodedLocalFramePtrReg = EncodedFramePtrReg::StackPtr;
CurFn->EncodedParamFramePtrReg = EncodedFramePtrReg::StackPtr;
} else {
// If there is an FP, parameters are always relative to it.
CurFn->EncodedParamFramePtrReg = EncodedFramePtrReg::FramePtr;
if (CurFn->HasStackRealignment) {
// If the stack needs realignment, locals are relative to SP or VFRAME.
CurFn->EncodedLocalFramePtrReg = EncodedFramePtrReg::StackPtr;
} else {
// Otherwise, locals are relative to EBP, and we probably have VLAs or
// other stack adjustments.
CurFn->EncodedLocalFramePtrReg = EncodedFramePtrReg::FramePtr;
}
}
}
// Compute other frame procedure options.
FrameProcedureOptions FPO = FrameProcedureOptions::None;
if (MFI.hasVarSizedObjects())
FPO |= FrameProcedureOptions::HasAlloca;
if (MF->exposesReturnsTwice())
FPO |= FrameProcedureOptions::HasSetJmp;
// FIXME: Set HasLongJmp if we ever track that info.
if (MF->hasInlineAsm())
FPO |= FrameProcedureOptions::HasInlineAssembly;
if (GV.hasPersonalityFn()) {
if (isAsynchronousEHPersonality(
classifyEHPersonality(GV.getPersonalityFn())))
FPO |= FrameProcedureOptions::HasStructuredExceptionHandling;
else
FPO |= FrameProcedureOptions::HasExceptionHandling;
}
if (GV.hasFnAttribute(Attribute::InlineHint))
FPO |= FrameProcedureOptions::MarkedInline;
if (GV.hasFnAttribute(Attribute::Naked))
FPO |= FrameProcedureOptions::Naked;
if (MFI.hasStackProtectorIndex())
FPO |= FrameProcedureOptions::SecurityChecks;
FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedLocalFramePtrReg) << 14U);
FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedParamFramePtrReg) << 16U);
if (Asm->TM.getOptLevel() != CodeGenOpt::None &&
!GV.hasOptSize() && !GV.hasOptNone())
FPO |= FrameProcedureOptions::OptimizedForSpeed;
// FIXME: Set GuardCfg when it is implemented.
CurFn->FrameProcOpts = FPO;
OS.EmitCVFuncIdDirective(CurFn->FuncId);
// Find the end of the function prolog. First known non-DBG_VALUE and
// non-frame setup location marks the beginning of the function body.
// FIXME: is there a simpler a way to do this? Can we just search
// for the first instruction of the function, not the last of the prolog?
DebugLoc PrologEndLoc;
bool EmptyPrologue = true;
for (const auto &MBB : *MF) {
for (const auto &MI : MBB) {
if (!MI.isMetaInstruction() && !MI.getFlag(MachineInstr::FrameSetup) &&
MI.getDebugLoc()) {
PrologEndLoc = MI.getDebugLoc();
break;
} else if (!MI.isMetaInstruction()) {
EmptyPrologue = false;
}
}
}
// Record beginning of function if we have a non-empty prologue.
if (PrologEndLoc && !EmptyPrologue) {
DebugLoc FnStartDL = PrologEndLoc.getFnDebugLoc();
maybeRecordLocation(FnStartDL, MF);
}
+
+ // Find heap alloc sites and emit labels around them.
+ for (const auto &MBB : *MF) {
+ for (const auto &MI : MBB) {
+ if (MI.getHeapAllocMarker()) {
+ requestLabelBeforeInsn(&MI);
+ requestLabelAfterInsn(&MI);
+ }
+ }
+ }
}
static bool shouldEmitUdt(const DIType *T) {
if (!T)
return false;
// MSVC does not emit UDTs for typedefs that are scoped to classes.
if (T->getTag() == dwarf::DW_TAG_typedef) {
if (DIScope *Scope = T->getScope()) {
switch (Scope->getTag()) {
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_union_type:
return false;
}
}
}
while (true) {
if (!T || T->isForwardDecl())
return false;
const DIDerivedType *DT = dyn_cast<DIDerivedType>(T);
if (!DT)
return true;
T = DT->getBaseType();
}
return true;
}
void CodeViewDebug::addToUDTs(const DIType *Ty) {
// Don't record empty UDTs.
if (Ty->getName().empty())
return;
if (!shouldEmitUdt(Ty))
return;
SmallVector<StringRef, 5> QualifiedNameComponents;
const DISubprogram *ClosestSubprogram =
getQualifiedNameComponents(Ty->getScope(), QualifiedNameComponents);
std::string FullyQualifiedName =
getQualifiedName(QualifiedNameComponents, getPrettyScopeName(Ty));
if (ClosestSubprogram == nullptr) {
GlobalUDTs.emplace_back(std::move(FullyQualifiedName), Ty);
} else if (ClosestSubprogram == CurrentSubprogram) {
LocalUDTs.emplace_back(std::move(FullyQualifiedName), Ty);
}
// TODO: What if the ClosestSubprogram is neither null or the current
// subprogram? Currently, the UDT just gets dropped on the floor.
//
// The current behavior is not desirable. To get maximal fidelity, we would
// need to perform all type translation before beginning emission of .debug$S
// and then make LocalUDTs a member of FunctionInfo
}
TypeIndex CodeViewDebug::lowerType(const DIType *Ty, const DIType *ClassTy) {
// Generic dispatch for lowering an unknown type.
switch (Ty->getTag()) {
case dwarf::DW_TAG_array_type:
return lowerTypeArray(cast<DICompositeType>(Ty));
case dwarf::DW_TAG_typedef:
return lowerTypeAlias(cast<DIDerivedType>(Ty));
case dwarf::DW_TAG_base_type:
return lowerTypeBasic(cast<DIBasicType>(Ty));
case dwarf::DW_TAG_pointer_type:
if (cast<DIDerivedType>(Ty)->getName() == "__vtbl_ptr_type")
return lowerTypeVFTableShape(cast<DIDerivedType>(Ty));
LLVM_FALLTHROUGH;
case dwarf::DW_TAG_reference_type:
case dwarf::DW_TAG_rvalue_reference_type:
return lowerTypePointer(cast<DIDerivedType>(Ty));
case dwarf::DW_TAG_ptr_to_member_type:
return lowerTypeMemberPointer(cast<DIDerivedType>(Ty));
case dwarf::DW_TAG_restrict_type:
case dwarf::DW_TAG_const_type:
case dwarf::DW_TAG_volatile_type:
// TODO: add support for DW_TAG_atomic_type here
return lowerTypeModifier(cast<DIDerivedType>(Ty));
case dwarf::DW_TAG_subroutine_type:
if (ClassTy) {
// The member function type of a member function pointer has no
// ThisAdjustment.
return lowerTypeMemberFunction(cast<DISubroutineType>(Ty), ClassTy,
/*ThisAdjustment=*/0,
/*IsStaticMethod=*/false);
}
return lowerTypeFunction(cast<DISubroutineType>(Ty));
case dwarf::DW_TAG_enumeration_type:
return lowerTypeEnum(cast<DICompositeType>(Ty));
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
return lowerTypeClass(cast<DICompositeType>(Ty));
case dwarf::DW_TAG_union_type:
return lowerTypeUnion(cast<DICompositeType>(Ty));
case dwarf::DW_TAG_unspecified_type:
if (Ty->getName() == "decltype(nullptr)")
return TypeIndex::NullptrT();
return TypeIndex::None();
default:
// Use the null type index.
return TypeIndex();
}
}
TypeIndex CodeViewDebug::lowerTypeAlias(const DIDerivedType *Ty) {
TypeIndex UnderlyingTypeIndex = getTypeIndex(Ty->getBaseType());
StringRef TypeName = Ty->getName();
addToUDTs(Ty);
if (UnderlyingTypeIndex == TypeIndex(SimpleTypeKind::Int32Long) &&
TypeName == "HRESULT")
return TypeIndex(SimpleTypeKind::HResult);
if (UnderlyingTypeIndex == TypeIndex(SimpleTypeKind::UInt16Short) &&
TypeName == "wchar_t")
return TypeIndex(SimpleTypeKind::WideCharacter);
return UnderlyingTypeIndex;
}
TypeIndex CodeViewDebug::lowerTypeArray(const DICompositeType *Ty) {
const DIType *ElementType = Ty->getBaseType();
TypeIndex ElementTypeIndex = getTypeIndex(ElementType);
// IndexType is size_t, which depends on the bitness of the target.
TypeIndex IndexType = getPointerSizeInBytes() == 8
? TypeIndex(SimpleTypeKind::UInt64Quad)
: TypeIndex(SimpleTypeKind::UInt32Long);
uint64_t ElementSize = getBaseTypeSize(ElementType) / 8;
// Add subranges to array type.
DINodeArray Elements = Ty->getElements();
for (int i = Elements.size() - 1; i >= 0; --i) {
const DINode *Element = Elements[i];
assert(Element->getTag() == dwarf::DW_TAG_subrange_type);
const DISubrange *Subrange = cast<DISubrange>(Element);
assert(Subrange->getLowerBound() == 0 &&
"codeview doesn't support subranges with lower bounds");
int64_t Count = -1;
if (auto *CI = Subrange->getCount().dyn_cast<ConstantInt*>())
Count = CI->getSExtValue();
// Forward declarations of arrays without a size and VLAs use a count of -1.
// Emit a count of zero in these cases to match what MSVC does for arrays
// without a size. MSVC doesn't support VLAs, so it's not clear what we
// should do for them even if we could distinguish them.
if (Count == -1)
Count = 0;
// Update the element size and element type index for subsequent subranges.
ElementSize *= Count;
// If this is the outermost array, use the size from the array. It will be
// more accurate if we had a VLA or an incomplete element type size.
uint64_t ArraySize =
(i == 0 && ElementSize == 0) ? Ty->getSizeInBits() / 8 : ElementSize;
StringRef Name = (i == 0) ? Ty->getName() : "";
ArrayRecord AR(ElementTypeIndex, IndexType, ArraySize, Name);
ElementTypeIndex = TypeTable.writeLeafType(AR);
}
return ElementTypeIndex;
}
TypeIndex CodeViewDebug::lowerTypeBasic(const DIBasicType *Ty) {
TypeIndex Index;
dwarf::TypeKind Kind;
uint32_t ByteSize;
Kind = static_cast<dwarf::TypeKind>(Ty->getEncoding());
ByteSize = Ty->getSizeInBits() / 8;
SimpleTypeKind STK = SimpleTypeKind::None;
switch (Kind) {
case dwarf::DW_ATE_address:
// FIXME: Translate
break;
case dwarf::DW_ATE_boolean:
switch (ByteSize) {
case 1: STK = SimpleTypeKind::Boolean8; break;
case 2: STK = SimpleTypeKind::Boolean16; break;
case 4: STK = SimpleTypeKind::Boolean32; break;
case 8: STK = SimpleTypeKind::Boolean64; break;
case 16: STK = SimpleTypeKind::Boolean128; break;
}
break;
case dwarf::DW_ATE_complex_float:
switch (ByteSize) {
case 2: STK = SimpleTypeKind::Complex16; break;
case 4: STK = SimpleTypeKind::Complex32; break;
case 8: STK = SimpleTypeKind::Complex64; break;
case 10: STK = SimpleTypeKind::Complex80; break;
case 16: STK = SimpleTypeKind::Complex128; break;
}
break;
case dwarf::DW_ATE_float:
switch (ByteSize) {
case 2: STK = SimpleTypeKind::Float16; break;
case 4: STK = SimpleTypeKind::Float32; break;
case 6: STK = SimpleTypeKind::Float48; break;
case 8: STK = SimpleTypeKind::Float64; break;
case 10: STK = SimpleTypeKind::Float80; break;
case 16: STK = SimpleTypeKind::Float128; break;
}
break;
case dwarf::DW_ATE_signed:
switch (ByteSize) {
case 1: STK = SimpleTypeKind::SignedCharacter; break;
case 2: STK = SimpleTypeKind::Int16Short; break;
case 4: STK = SimpleTypeKind::Int32; break;
case 8: STK = SimpleTypeKind::Int64Quad; break;
case 16: STK = SimpleTypeKind::Int128Oct; break;
}
break;
case dwarf::DW_ATE_unsigned:
switch (ByteSize) {
case 1: STK = SimpleTypeKind::UnsignedCharacter; break;
case 2: STK = SimpleTypeKind::UInt16Short; break;
case 4: STK = SimpleTypeKind::UInt32; break;
case 8: STK = SimpleTypeKind::UInt64Quad; break;
case 16: STK = SimpleTypeKind::UInt128Oct; break;
}
break;
case dwarf::DW_ATE_UTF:
switch (ByteSize) {
case 2: STK = SimpleTypeKind::Character16; break;
case 4: STK = SimpleTypeKind::Character32; break;
}
break;
case dwarf::DW_ATE_signed_char:
if (ByteSize == 1)
STK = SimpleTypeKind::SignedCharacter;
break;
case dwarf::DW_ATE_unsigned_char:
if (ByteSize == 1)
STK = SimpleTypeKind::UnsignedCharacter;
break;
default:
break;
}
// Apply some fixups based on the source-level type name.
if (STK == SimpleTypeKind::Int32 && Ty->getName() == "long int")
STK = SimpleTypeKind::Int32Long;
if (STK == SimpleTypeKind::UInt32 && Ty->getName() == "long unsigned int")
STK = SimpleTypeKind::UInt32Long;
if (STK == SimpleTypeKind::UInt16Short &&
(Ty->getName() == "wchar_t" || Ty->getName() == "__wchar_t"))
STK = SimpleTypeKind::WideCharacter;
if ((STK == SimpleTypeKind::SignedCharacter ||
STK == SimpleTypeKind::UnsignedCharacter) &&
Ty->getName() == "char")
STK = SimpleTypeKind::NarrowCharacter;
return TypeIndex(STK);
}
TypeIndex CodeViewDebug::lowerTypePointer(const DIDerivedType *Ty,
PointerOptions PO) {
TypeIndex PointeeTI = getTypeIndex(Ty->getBaseType());
// Pointers to simple types without any options can use SimpleTypeMode, rather
// than having a dedicated pointer type record.
if (PointeeTI.isSimple() && PO == PointerOptions::None &&
PointeeTI.getSimpleMode() == SimpleTypeMode::Direct &&
Ty->getTag() == dwarf::DW_TAG_pointer_type) {
SimpleTypeMode Mode = Ty->getSizeInBits() == 64
? SimpleTypeMode::NearPointer64
: SimpleTypeMode::NearPointer32;
return TypeIndex(PointeeTI.getSimpleKind(), Mode);
}
PointerKind PK =
Ty->getSizeInBits() == 64 ? PointerKind::Near64 : PointerKind::Near32;
PointerMode PM = PointerMode::Pointer;
switch (Ty->getTag()) {
default: llvm_unreachable("not a pointer tag type");
case dwarf::DW_TAG_pointer_type:
PM = PointerMode::Pointer;
break;
case dwarf::DW_TAG_reference_type:
PM = PointerMode::LValueReference;
break;
case dwarf::DW_TAG_rvalue_reference_type:
PM = PointerMode::RValueReference;
break;
}
if (Ty->isObjectPointer())
PO |= PointerOptions::Const;
PointerRecord PR(PointeeTI, PK, PM, PO, Ty->getSizeInBits() / 8);
return TypeTable.writeLeafType(PR);
}
static PointerToMemberRepresentation
translatePtrToMemberRep(unsigned SizeInBytes, bool IsPMF, unsigned Flags) {
// SizeInBytes being zero generally implies that the member pointer type was
// incomplete, which can happen if it is part of a function prototype. In this
// case, use the unknown model instead of the general model.
if (IsPMF) {
switch (Flags & DINode::FlagPtrToMemberRep) {
case 0:
return SizeInBytes == 0 ? PointerToMemberRepresentation::Unknown
: PointerToMemberRepresentation::GeneralFunction;
case DINode::FlagSingleInheritance:
return PointerToMemberRepresentation::SingleInheritanceFunction;
case DINode::FlagMultipleInheritance:
return PointerToMemberRepresentation::MultipleInheritanceFunction;
case DINode::FlagVirtualInheritance:
return PointerToMemberRepresentation::VirtualInheritanceFunction;
}
} else {
switch (Flags & DINode::FlagPtrToMemberRep) {
case 0:
return SizeInBytes == 0 ? PointerToMemberRepresentation::Unknown
: PointerToMemberRepresentation::GeneralData;
case DINode::FlagSingleInheritance:
return PointerToMemberRepresentation::SingleInheritanceData;
case DINode::FlagMultipleInheritance:
return PointerToMemberRepresentation::MultipleInheritanceData;
case DINode::FlagVirtualInheritance:
return PointerToMemberRepresentation::VirtualInheritanceData;
}
}
llvm_unreachable("invalid ptr to member representation");
}
TypeIndex CodeViewDebug::lowerTypeMemberPointer(const DIDerivedType *Ty,
PointerOptions PO) {
assert(Ty->getTag() == dwarf::DW_TAG_ptr_to_member_type);
TypeIndex ClassTI = getTypeIndex(Ty->getClassType());
TypeIndex PointeeTI = getTypeIndex(Ty->getBaseType(), Ty->getClassType());
PointerKind PK = getPointerSizeInBytes() == 8 ? PointerKind::Near64
: PointerKind::Near32;
bool IsPMF = isa<DISubroutineType>(Ty->getBaseType());
PointerMode PM = IsPMF ? PointerMode::PointerToMemberFunction
: PointerMode::PointerToDataMember;
assert(Ty->getSizeInBits() / 8 <= 0xff && "pointer size too big");
uint8_t SizeInBytes = Ty->getSizeInBits() / 8;
MemberPointerInfo MPI(
ClassTI, translatePtrToMemberRep(SizeInBytes, IsPMF, Ty->getFlags()));
PointerRecord PR(PointeeTI, PK, PM, PO, SizeInBytes, MPI);
return TypeTable.writeLeafType(PR);
}
/// Given a DWARF calling convention, get the CodeView equivalent. If we don't
/// have a translation, use the NearC convention.
static CallingConvention dwarfCCToCodeView(unsigned DwarfCC) {
switch (DwarfCC) {
case dwarf::DW_CC_normal: return CallingConvention::NearC;
case dwarf::DW_CC_BORLAND_msfastcall: return CallingConvention::NearFast;
case dwarf::DW_CC_BORLAND_thiscall: return CallingConvention::ThisCall;
case dwarf::DW_CC_BORLAND_stdcall: return CallingConvention::NearStdCall;
case dwarf::DW_CC_BORLAND_pascal: return CallingConvention::NearPascal;
case dwarf::DW_CC_LLVM_vectorcall: return CallingConvention::NearVector;
}
return CallingConvention::NearC;
}
TypeIndex CodeViewDebug::lowerTypeModifier(const DIDerivedType *Ty) {
ModifierOptions Mods = ModifierOptions::None;
PointerOptions PO = PointerOptions::None;
bool IsModifier = true;
const DIType *BaseTy = Ty;
while (IsModifier && BaseTy) {
// FIXME: Need to add DWARF tags for __unaligned and _Atomic
switch (BaseTy->getTag()) {
case dwarf::DW_TAG_const_type:
Mods |= ModifierOptions::Const;
PO |= PointerOptions::Const;
break;
case dwarf::DW_TAG_volatile_type:
Mods |= ModifierOptions::Volatile;
PO |= PointerOptions::Volatile;
break;
case dwarf::DW_TAG_restrict_type:
// Only pointer types be marked with __restrict. There is no known flag
// for __restrict in LF_MODIFIER records.
PO |= PointerOptions::Restrict;
break;
default:
IsModifier = false;
break;
}
if (IsModifier)
BaseTy = cast<DIDerivedType>(BaseTy)->getBaseType();
}
// Check if the inner type will use an LF_POINTER record. If so, the
// qualifiers will go in the LF_POINTER record. This comes up for types like
// 'int *const' and 'int *__restrict', not the more common cases like 'const
// char *'.
if (BaseTy) {
switch (BaseTy->getTag()) {
case dwarf::DW_TAG_pointer_type:
case dwarf::DW_TAG_reference_type:
case dwarf::DW_TAG_rvalue_reference_type:
return lowerTypePointer(cast<DIDerivedType>(BaseTy), PO);
case dwarf::DW_TAG_ptr_to_member_type:
return lowerTypeMemberPointer(cast<DIDerivedType>(BaseTy), PO);
default:
break;
}
}
TypeIndex ModifiedTI = getTypeIndex(BaseTy);
// Return the base type index if there aren't any modifiers. For example, the
// metadata could contain restrict wrappers around non-pointer types.
if (Mods == ModifierOptions::None)
return ModifiedTI;
ModifierRecord MR(ModifiedTI, Mods);
return TypeTable.writeLeafType(MR);
}
TypeIndex CodeViewDebug::lowerTypeFunction(const DISubroutineType *Ty) {
SmallVector<TypeIndex, 8> ReturnAndArgTypeIndices;
for (const DIType *ArgType : Ty->getTypeArray())
ReturnAndArgTypeIndices.push_back(getTypeIndex(ArgType));
// MSVC uses type none for variadic argument.
if (ReturnAndArgTypeIndices.size() > 1 &&
ReturnAndArgTypeIndices.back() == TypeIndex::Void()) {
ReturnAndArgTypeIndices.back() = TypeIndex::None();
}
TypeIndex ReturnTypeIndex = TypeIndex::Void();
ArrayRef<TypeIndex> ArgTypeIndices = None;
if (!ReturnAndArgTypeIndices.empty()) {
auto ReturnAndArgTypesRef = makeArrayRef(ReturnAndArgTypeIndices);
ReturnTypeIndex = ReturnAndArgTypesRef.front();
ArgTypeIndices = ReturnAndArgTypesRef.drop_front();
}
ArgListRecord ArgListRec(TypeRecordKind::ArgList, ArgTypeIndices);
TypeIndex ArgListIndex = TypeTable.writeLeafType(ArgListRec);
CallingConvention CC = dwarfCCToCodeView(Ty->getCC());
FunctionOptions FO = getFunctionOptions(Ty);
ProcedureRecord Procedure(ReturnTypeIndex, CC, FO, ArgTypeIndices.size(),
ArgListIndex);
return TypeTable.writeLeafType(Procedure);
}
TypeIndex CodeViewDebug::lowerTypeMemberFunction(const DISubroutineType *Ty,
const DIType *ClassTy,
int ThisAdjustment,
bool IsStaticMethod,
FunctionOptions FO) {
// Lower the containing class type.
TypeIndex ClassType = getTypeIndex(ClassTy);
DITypeRefArray ReturnAndArgs = Ty->getTypeArray();
unsigned Index = 0;
SmallVector<TypeIndex, 8> ArgTypeIndices;
TypeIndex ReturnTypeIndex = TypeIndex::Void();
if (ReturnAndArgs.size() > Index) {
ReturnTypeIndex = getTypeIndex(ReturnAndArgs[Index++]);
}
// If the first argument is a pointer type and this isn't a static method,
// treat it as the special 'this' parameter, which is encoded separately from
// the arguments.
TypeIndex ThisTypeIndex;
if (!IsStaticMethod && ReturnAndArgs.size() > Index) {
if (const DIDerivedType *PtrTy =
dyn_cast_or_null<DIDerivedType>(ReturnAndArgs[Index])) {
if (PtrTy->getTag() == dwarf::DW_TAG_pointer_type) {
ThisTypeIndex = getTypeIndexForThisPtr(PtrTy, Ty);
Index++;
}
}
}
while (Index < ReturnAndArgs.size())
ArgTypeIndices.push_back(getTypeIndex(ReturnAndArgs[Index++]));
// MSVC uses type none for variadic argument.
if (!ArgTypeIndices.empty() && ArgTypeIndices.back() == TypeIndex::Void())
ArgTypeIndices.back() = TypeIndex::None();
ArgListRecord ArgListRec(TypeRecordKind::ArgList, ArgTypeIndices);
TypeIndex ArgListIndex = TypeTable.writeLeafType(ArgListRec);
CallingConvention CC = dwarfCCToCodeView(Ty->getCC());
MemberFunctionRecord MFR(ReturnTypeIndex, ClassType, ThisTypeIndex, CC, FO,
ArgTypeIndices.size(), ArgListIndex, ThisAdjustment);
return TypeTable.writeLeafType(MFR);
}
TypeIndex CodeViewDebug::lowerTypeVFTableShape(const DIDerivedType *Ty) {
unsigned VSlotCount =
Ty->getSizeInBits() / (8 * Asm->MAI->getCodePointerSize());
SmallVector<VFTableSlotKind, 4> Slots(VSlotCount, VFTableSlotKind::Near);
VFTableShapeRecord VFTSR(Slots);
return TypeTable.writeLeafType(VFTSR);
}
static MemberAccess translateAccessFlags(unsigned RecordTag, unsigned Flags) {
switch (Flags & DINode::FlagAccessibility) {
case DINode::FlagPrivate: return MemberAccess::Private;
case DINode::FlagPublic: return MemberAccess::Public;
case DINode::FlagProtected: return MemberAccess::Protected;
case 0:
// If there was no explicit access control, provide the default for the tag.
return RecordTag == dwarf::DW_TAG_class_type ? MemberAccess::Private
: MemberAccess::Public;
}
llvm_unreachable("access flags are exclusive");
}
static MethodOptions translateMethodOptionFlags(const DISubprogram *SP) {
if (SP->isArtificial())
return MethodOptions::CompilerGenerated;
// FIXME: Handle other MethodOptions.
return MethodOptions::None;
}
static MethodKind translateMethodKindFlags(const DISubprogram *SP,
bool Introduced) {
if (SP->getFlags() & DINode::FlagStaticMember)
return MethodKind::Static;
switch (SP->getVirtuality()) {
case dwarf::DW_VIRTUALITY_none:
break;
case dwarf::DW_VIRTUALITY_virtual:
return Introduced ? MethodKind::IntroducingVirtual : MethodKind::Virtual;
case dwarf::DW_VIRTUALITY_pure_virtual:
return Introduced ? MethodKind::PureIntroducingVirtual
: MethodKind::PureVirtual;
default:
llvm_unreachable("unhandled virtuality case");
}
return MethodKind::Vanilla;
}
static TypeRecordKind getRecordKind(const DICompositeType *Ty) {
switch (Ty->getTag()) {
case dwarf::DW_TAG_class_type: return TypeRecordKind::Class;
case dwarf::DW_TAG_structure_type: return TypeRecordKind::Struct;
}
llvm_unreachable("unexpected tag");
}
/// Return ClassOptions that should be present on both the forward declaration
/// and the defintion of a tag type.
static ClassOptions getCommonClassOptions(const DICompositeType *Ty) {
ClassOptions CO = ClassOptions::None;
// MSVC always sets this flag, even for local types. Clang doesn't always
// appear to give every type a linkage name, which may be problematic for us.
// FIXME: Investigate the consequences of not following them here.
if (!Ty->getIdentifier().empty())
CO |= ClassOptions::HasUniqueName;
// Put the Nested flag on a type if it appears immediately inside a tag type.
// Do not walk the scope chain. Do not attempt to compute ContainsNestedClass
// here. That flag is only set on definitions, and not forward declarations.
const DIScope *ImmediateScope = Ty->getScope();
if (ImmediateScope && isa<DICompositeType>(ImmediateScope))
CO |= ClassOptions::Nested;
// Put the Scoped flag on function-local types. MSVC puts this flag for enum
// type only when it has an immediate function scope. Clang never puts enums
// inside DILexicalBlock scopes. Enum types, as generated by clang, are
// always in function, class, or file scopes.
if (Ty->getTag() == dwarf::DW_TAG_enumeration_type) {
if (ImmediateScope && isa<DISubprogram>(ImmediateScope))
CO |= ClassOptions::Scoped;
} else {
for (const DIScope *Scope = ImmediateScope; Scope != nullptr;
Scope = Scope->getScope()) {
if (isa<DISubprogram>(Scope)) {
CO |= ClassOptions::Scoped;
break;
}
}
}
return CO;
}
void CodeViewDebug::addUDTSrcLine(const DIType *Ty, TypeIndex TI) {
switch (Ty->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
case dwarf::DW_TAG_enumeration_type:
break;
default:
return;
}
if (const auto *File = Ty->getFile()) {
StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(File));
TypeIndex SIDI = TypeTable.writeLeafType(SIDR);
UdtSourceLineRecord USLR(TI, SIDI, Ty->getLine());
TypeTable.writeLeafType(USLR);
}
}
TypeIndex CodeViewDebug::lowerTypeEnum(const DICompositeType *Ty) {
ClassOptions CO = getCommonClassOptions(Ty);
TypeIndex FTI;
unsigned EnumeratorCount = 0;
if (Ty->isForwardDecl()) {
CO |= ClassOptions::ForwardReference;
} else {
ContinuationRecordBuilder ContinuationBuilder;
ContinuationBuilder.begin(ContinuationRecordKind::FieldList);
for (const DINode *Element : Ty->getElements()) {
// We assume that the frontend provides all members in source declaration
// order, which is what MSVC does.
if (auto *Enumerator = dyn_cast_or_null<DIEnumerator>(Element)) {
EnumeratorRecord ER(MemberAccess::Public,
APSInt::getUnsigned(Enumerator->getValue()),
Enumerator->getName());
ContinuationBuilder.writeMemberType(ER);
EnumeratorCount++;
}
}
FTI = TypeTable.insertRecord(ContinuationBuilder);
}
std::string FullName = getFullyQualifiedName(Ty);
EnumRecord ER(EnumeratorCount, CO, FTI, FullName, Ty->getIdentifier(),
getTypeIndex(Ty->getBaseType()));
TypeIndex EnumTI = TypeTable.writeLeafType(ER);
addUDTSrcLine(Ty, EnumTI);
return EnumTI;
}
//===----------------------------------------------------------------------===//
// ClassInfo
//===----------------------------------------------------------------------===//
struct llvm::ClassInfo {
struct MemberInfo {
const DIDerivedType *MemberTypeNode;
uint64_t BaseOffset;
};
// [MemberInfo]
using MemberList = std::vector<MemberInfo>;
using MethodsList = TinyPtrVector<const DISubprogram *>;
// MethodName -> MethodsList
using MethodsMap = MapVector<MDString *, MethodsList>;
/// Base classes.
std::vector<const DIDerivedType *> Inheritance;
/// Direct members.
MemberList Members;
// Direct overloaded methods gathered by name.
MethodsMap Methods;
TypeIndex VShapeTI;
std::vector<const DIType *> NestedTypes;
};
void CodeViewDebug::clear() {
assert(CurFn == nullptr);
FileIdMap.clear();
FnDebugInfo.clear();
FileToFilepathMap.clear();
LocalUDTs.clear();
GlobalUDTs.clear();
TypeIndices.clear();
CompleteTypeIndices.clear();
ScopeGlobals.clear();
}
void CodeViewDebug::collectMemberInfo(ClassInfo &Info,
const DIDerivedType *DDTy) {
if (!DDTy->getName().empty()) {
Info.Members.push_back({DDTy, 0});
return;
}
// An unnamed member may represent a nested struct or union. Attempt to
// interpret the unnamed member as a DICompositeType possibly wrapped in
// qualifier types. Add all the indirect fields to the current record if that
// succeeds, and drop the member if that fails.
assert((DDTy->getOffsetInBits() % 8) == 0 && "Unnamed bitfield member!");
uint64_t Offset = DDTy->getOffsetInBits();
const DIType *Ty = DDTy->getBaseType();
bool FullyResolved = false;
while (!FullyResolved) {
switch (Ty->getTag()) {
case dwarf::DW_TAG_const_type:
case dwarf::DW_TAG_volatile_type:
// FIXME: we should apply the qualifier types to the indirect fields
// rather than dropping them.
Ty = cast<DIDerivedType>(Ty)->getBaseType();
break;
default:
FullyResolved = true;
break;
}
}
const DICompositeType *DCTy = dyn_cast<DICompositeType>(Ty);
if (!DCTy)
return;
ClassInfo NestedInfo = collectClassInfo(DCTy);
for (const ClassInfo::MemberInfo &IndirectField : NestedInfo.Members)
Info.Members.push_back(
{IndirectField.MemberTypeNode, IndirectField.BaseOffset + Offset});
}
ClassInfo CodeViewDebug::collectClassInfo(const DICompositeType *Ty) {
ClassInfo Info;
// Add elements to structure type.
DINodeArray Elements = Ty->getElements();
for (auto *Element : Elements) {
// We assume that the frontend provides all members in source declaration
// order, which is what MSVC does.
if (!Element)
continue;
if (auto *SP = dyn_cast<DISubprogram>(Element)) {
Info.Methods[SP->getRawName()].push_back(SP);
} else if (auto *DDTy = dyn_cast<DIDerivedType>(Element)) {
if (DDTy->getTag() == dwarf::DW_TAG_member) {
collectMemberInfo(Info, DDTy);
} else if (DDTy->getTag() == dwarf::DW_TAG_inheritance) {
Info.Inheritance.push_back(DDTy);
} else if (DDTy->getTag() == dwarf::DW_TAG_pointer_type &&
DDTy->getName() == "__vtbl_ptr_type") {
Info.VShapeTI = getTypeIndex(DDTy);
} else if (DDTy->getTag() == dwarf::DW_TAG_typedef) {
Info.NestedTypes.push_back(DDTy);
} else if (DDTy->getTag() == dwarf::DW_TAG_friend) {
// Ignore friend members. It appears that MSVC emitted info about
// friends in the past, but modern versions do not.
}
} else if (auto *Composite = dyn_cast<DICompositeType>(Element)) {
Info.NestedTypes.push_back(Composite);
}
// Skip other unrecognized kinds of elements.
}
return Info;
}
static bool shouldAlwaysEmitCompleteClassType(const DICompositeType *Ty) {
// This routine is used by lowerTypeClass and lowerTypeUnion to determine
// if a complete type should be emitted instead of a forward reference.
return Ty->getName().empty() && Ty->getIdentifier().empty() &&
!Ty->isForwardDecl();
}
TypeIndex CodeViewDebug::lowerTypeClass(const DICompositeType *Ty) {
// Emit the complete type for unnamed structs. C++ classes with methods
// which have a circular reference back to the class type are expected to
// be named by the front-end and should not be "unnamed". C unnamed
// structs should not have circular references.
if (shouldAlwaysEmitCompleteClassType(Ty)) {
// If this unnamed complete type is already in the process of being defined
// then the description of the type is malformed and cannot be emitted
// into CodeView correctly so report a fatal error.
auto I = CompleteTypeIndices.find(Ty);
if (I != CompleteTypeIndices.end() && I->second == TypeIndex())
report_fatal_error("cannot debug circular reference to unnamed type");
return getCompleteTypeIndex(Ty);
}
// First, construct the forward decl. Don't look into Ty to compute the
// forward decl options, since it might not be available in all TUs.
TypeRecordKind Kind = getRecordKind(Ty);
ClassOptions CO =
ClassOptions::ForwardReference | getCommonClassOptions(Ty);
std::string FullName = getFullyQualifiedName(Ty);
ClassRecord CR(Kind, 0, CO, TypeIndex(), TypeIndex(), TypeIndex(), 0,
FullName, Ty->getIdentifier());
TypeIndex FwdDeclTI = TypeTable.writeLeafType(CR);
if (!Ty->isForwardDecl())
DeferredCompleteTypes.push_back(Ty);
return FwdDeclTI;
}
TypeIndex CodeViewDebug::lowerCompleteTypeClass(const DICompositeType *Ty) {
// Construct the field list and complete type record.
TypeRecordKind Kind = getRecordKind(Ty);
ClassOptions CO = getCommonClassOptions(Ty);
TypeIndex FieldTI;
TypeIndex VShapeTI;
unsigned FieldCount;
bool ContainsNestedClass;
std::tie(FieldTI, VShapeTI, FieldCount, ContainsNestedClass) =
lowerRecordFieldList(Ty);
if (ContainsNestedClass)
CO |= ClassOptions::ContainsNestedClass;
// MSVC appears to set this flag by searching any destructor or method with
// FunctionOptions::Constructor among the emitted members. Clang AST has all
// the members, however special member functions are not yet emitted into
// debug information. For now checking a class's non-triviality seems enough.
// FIXME: not true for a nested unnamed struct.
if (isNonTrivial(Ty))
CO |= ClassOptions::HasConstructorOrDestructor;
std::string FullName = getFullyQualifiedName(Ty);
uint64_t SizeInBytes = Ty->getSizeInBits() / 8;
ClassRecord CR(Kind, FieldCount, CO, FieldTI, TypeIndex(), VShapeTI,
SizeInBytes, FullName, Ty->getIdentifier());
TypeIndex ClassTI = TypeTable.writeLeafType(CR);
addUDTSrcLine(Ty, ClassTI);
addToUDTs(Ty);
return ClassTI;
}
TypeIndex CodeViewDebug::lowerTypeUnion(const DICompositeType *Ty) {
// Emit the complete type for unnamed unions.
if (shouldAlwaysEmitCompleteClassType(Ty))
return getCompleteTypeIndex(Ty);
ClassOptions CO =
ClassOptions::ForwardReference | getCommonClassOptions(Ty);
std::string FullName = getFullyQualifiedName(Ty);
UnionRecord UR(0, CO, TypeIndex(), 0, FullName, Ty->getIdentifier());
TypeIndex FwdDeclTI = TypeTable.writeLeafType(UR);
if (!Ty->isForwardDecl())
DeferredCompleteTypes.push_back(Ty);
return FwdDeclTI;
}
TypeIndex CodeViewDebug::lowerCompleteTypeUnion(const DICompositeType *Ty) {
ClassOptions CO = ClassOptions::Sealed | getCommonClassOptions(Ty);
TypeIndex FieldTI;
unsigned FieldCount;
bool ContainsNestedClass;
std::tie(FieldTI, std::ignore, FieldCount, ContainsNestedClass) =
lowerRecordFieldList(Ty);
if (ContainsNestedClass)
CO |= ClassOptions::ContainsNestedClass;
uint64_t SizeInBytes = Ty->getSizeInBits() / 8;
std::string FullName = getFullyQualifiedName(Ty);
UnionRecord UR(FieldCount, CO, FieldTI, SizeInBytes, FullName,
Ty->getIdentifier());
TypeIndex UnionTI = TypeTable.writeLeafType(UR);
addUDTSrcLine(Ty, UnionTI);
addToUDTs(Ty);
return UnionTI;
}
std::tuple<TypeIndex, TypeIndex, unsigned, bool>
CodeViewDebug::lowerRecordFieldList(const DICompositeType *Ty) {
// Manually count members. MSVC appears to count everything that generates a
// field list record. Each individual overload in a method overload group
// contributes to this count, even though the overload group is a single field
// list record.
unsigned MemberCount = 0;
ClassInfo Info = collectClassInfo(Ty);
ContinuationRecordBuilder ContinuationBuilder;
ContinuationBuilder.begin(ContinuationRecordKind::FieldList);
// Create base classes.
for (const DIDerivedType *I : Info.Inheritance) {
if (I->getFlags() & DINode::FlagVirtual) {
// Virtual base.
unsigned VBPtrOffset = I->getVBPtrOffset();
// FIXME: Despite the accessor name, the offset is really in bytes.
unsigned VBTableIndex = I->getOffsetInBits() / 4;
auto RecordKind = (I->getFlags() & DINode::FlagIndirectVirtualBase) == DINode::FlagIndirectVirtualBase
? TypeRecordKind::IndirectVirtualBaseClass
: TypeRecordKind::VirtualBaseClass;
VirtualBaseClassRecord VBCR(
RecordKind, translateAccessFlags(Ty->getTag(), I->getFlags()),
getTypeIndex(I->getBaseType()), getVBPTypeIndex(), VBPtrOffset,
VBTableIndex);
ContinuationBuilder.writeMemberType(VBCR);
MemberCount++;
} else {
assert(I->getOffsetInBits() % 8 == 0 &&
"bases must be on byte boundaries");
BaseClassRecord BCR(translateAccessFlags(Ty->getTag(), I->getFlags()),
getTypeIndex(I->getBaseType()),
I->getOffsetInBits() / 8);
ContinuationBuilder.writeMemberType(BCR);
MemberCount++;
}
}
// Create members.
for (ClassInfo::MemberInfo &MemberInfo : Info.Members) {
const DIDerivedType *Member = MemberInfo.MemberTypeNode;
TypeIndex MemberBaseType = getTypeIndex(Member->getBaseType());
StringRef MemberName = Member->getName();
MemberAccess Access =
translateAccessFlags(Ty->getTag(), Member->getFlags());
if (Member->isStaticMember()) {
StaticDataMemberRecord SDMR(Access, MemberBaseType, MemberName);
ContinuationBuilder.writeMemberType(SDMR);
MemberCount++;
continue;
}
// Virtual function pointer member.
if ((Member->getFlags() & DINode::FlagArtificial) &&
Member->getName().startswith("_vptr$")) {
VFPtrRecord VFPR(getTypeIndex(Member->getBaseType()));
ContinuationBuilder.writeMemberType(VFPR);
MemberCount++;
continue;
}
// Data member.
uint64_t MemberOffsetInBits =
Member->getOffsetInBits() + MemberInfo.BaseOffset;
if (Member->isBitField()) {
uint64_t StartBitOffset = MemberOffsetInBits;
if (const auto *CI =
dyn_cast_or_null<ConstantInt>(Member->getStorageOffsetInBits())) {
MemberOffsetInBits = CI->getZExtValue() + MemberInfo.BaseOffset;
}
StartBitOffset -= MemberOffsetInBits;
BitFieldRecord BFR(MemberBaseType, Member->getSizeInBits(),
StartBitOffset);
MemberBaseType = TypeTable.writeLeafType(BFR);
}
uint64_t MemberOffsetInBytes = MemberOffsetInBits / 8;
DataMemberRecord DMR(Access, MemberBaseType, MemberOffsetInBytes,
MemberName);
ContinuationBuilder.writeMemberType(DMR);
MemberCount++;
}
// Create methods
for (auto &MethodItr : Info.Methods) {
StringRef Name = MethodItr.first->getString();
std::vector<OneMethodRecord> Methods;
for (const DISubprogram *SP : MethodItr.second) {
TypeIndex MethodType = getMemberFunctionType(SP, Ty);
bool Introduced = SP->getFlags() & DINode::FlagIntroducedVirtual;
unsigned VFTableOffset = -1;
if (Introduced)
VFTableOffset = SP->getVirtualIndex() * getPointerSizeInBytes();
Methods.push_back(OneMethodRecord(
MethodType, translateAccessFlags(Ty->getTag(), SP->getFlags()),
translateMethodKindFlags(SP, Introduced),
translateMethodOptionFlags(SP), VFTableOffset, Name));
MemberCount++;
}
assert(!Methods.empty() && "Empty methods map entry");
if (Methods.size() == 1)
ContinuationBuilder.writeMemberType(Methods[0]);
else {
// FIXME: Make this use its own ContinuationBuilder so that
// MethodOverloadList can be split correctly.
MethodOverloadListRecord MOLR(Methods);
TypeIndex MethodList = TypeTable.writeLeafType(MOLR);
OverloadedMethodRecord OMR(Methods.size(), MethodList, Name);
ContinuationBuilder.writeMemberType(OMR);
}
}
// Create nested classes.
for (const DIType *Nested : Info.NestedTypes) {
NestedTypeRecord R(getTypeIndex(Nested), Nested->getName());
ContinuationBuilder.writeMemberType(R);
MemberCount++;
}
TypeIndex FieldTI = TypeTable.insertRecord(ContinuationBuilder);
return std::make_tuple(FieldTI, Info.VShapeTI, MemberCount,
!Info.NestedTypes.empty());
}
TypeIndex CodeViewDebug::getVBPTypeIndex() {
if (!VBPType.getIndex()) {
// Make a 'const int *' type.
ModifierRecord MR(TypeIndex::Int32(), ModifierOptions::Const);
TypeIndex ModifiedTI = TypeTable.writeLeafType(MR);
PointerKind PK = getPointerSizeInBytes() == 8 ? PointerKind::Near64
: PointerKind::Near32;
PointerMode PM = PointerMode::Pointer;
PointerOptions PO = PointerOptions::None;
PointerRecord PR(ModifiedTI, PK, PM, PO, getPointerSizeInBytes());
VBPType = TypeTable.writeLeafType(PR);
}
return VBPType;
}
TypeIndex CodeViewDebug::getTypeIndex(const DIType *Ty, const DIType *ClassTy) {
// The null DIType is the void type. Don't try to hash it.
if (!Ty)
return TypeIndex::Void();
// Check if we've already translated this type. Don't try to do a
// get-or-create style insertion that caches the hash lookup across the
// lowerType call. It will update the TypeIndices map.
auto I = TypeIndices.find({Ty, ClassTy});
if (I != TypeIndices.end())
return I->second;
TypeLoweringScope S(*this);
TypeIndex TI = lowerType(Ty, ClassTy);
return recordTypeIndexForDINode(Ty, TI, ClassTy);
}
codeview::TypeIndex
CodeViewDebug::getTypeIndexForThisPtr(const DIDerivedType *PtrTy,
const DISubroutineType *SubroutineTy) {
assert(PtrTy->getTag() == dwarf::DW_TAG_pointer_type &&
"this type must be a pointer type");
PointerOptions Options = PointerOptions::None;
if (SubroutineTy->getFlags() & DINode::DIFlags::FlagLValueReference)
Options = PointerOptions::LValueRefThisPointer;
else if (SubroutineTy->getFlags() & DINode::DIFlags::FlagRValueReference)
Options = PointerOptions::RValueRefThisPointer;
// Check if we've already translated this type. If there is no ref qualifier
// on the function then we look up this pointer type with no associated class
// so that the TypeIndex for the this pointer can be shared with the type
// index for other pointers to this class type. If there is a ref qualifier
// then we lookup the pointer using the subroutine as the parent type.
auto I = TypeIndices.find({PtrTy, SubroutineTy});
if (I != TypeIndices.end())
return I->second;
TypeLoweringScope S(*this);
TypeIndex TI = lowerTypePointer(PtrTy, Options);
return recordTypeIndexForDINode(PtrTy, TI, SubroutineTy);
}
TypeIndex CodeViewDebug::getTypeIndexForReferenceTo(const DIType *Ty) {
PointerRecord PR(getTypeIndex(Ty),
getPointerSizeInBytes() == 8 ? PointerKind::Near64
: PointerKind::Near32,
PointerMode::LValueReference, PointerOptions::None,
Ty->getSizeInBits() / 8);
return TypeTable.writeLeafType(PR);
}
TypeIndex CodeViewDebug::getCompleteTypeIndex(const DIType *Ty) {
// The null DIType is the void type. Don't try to hash it.
if (!Ty)
return TypeIndex::Void();
// Look through typedefs when getting the complete type index. Call
// getTypeIndex on the typdef to ensure that any UDTs are accumulated and are
// emitted only once.
if (Ty->getTag() == dwarf::DW_TAG_typedef)
(void)getTypeIndex(Ty);
while (Ty->getTag() == dwarf::DW_TAG_typedef)
Ty = cast<DIDerivedType>(Ty)->getBaseType();
// If this is a non-record type, the complete type index is the same as the
// normal type index. Just call getTypeIndex.
switch (Ty->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
break;
default:
return getTypeIndex(Ty);
}
const auto *CTy = cast<DICompositeType>(Ty);
TypeLoweringScope S(*this);
// Make sure the forward declaration is emitted first. It's unclear if this
// is necessary, but MSVC does it, and we should follow suit until we can show
// otherwise.
// We only emit a forward declaration for named types.
if (!CTy->getName().empty() || !CTy->getIdentifier().empty()) {
TypeIndex FwdDeclTI = getTypeIndex(CTy);
// Just use the forward decl if we don't have complete type info. This
// might happen if the frontend is using modules and expects the complete
// definition to be emitted elsewhere.
if (CTy->isForwardDecl())
return FwdDeclTI;
}
// Check if we've already translated the complete record type.
// Insert the type with a null TypeIndex to signify that the type is currently
// being lowered.
auto InsertResult = CompleteTypeIndices.insert({CTy, TypeIndex()});
if (!InsertResult.second)
return InsertResult.first->second;
TypeIndex TI;
switch (CTy->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
TI = lowerCompleteTypeClass(CTy);
break;
case dwarf::DW_TAG_union_type:
TI = lowerCompleteTypeUnion(CTy);
break;
default:
llvm_unreachable("not a record");
}
// Update the type index associated with this CompositeType. This cannot
// use the 'InsertResult' iterator above because it is potentially
// invalidated by map insertions which can occur while lowering the class
// type above.
CompleteTypeIndices[CTy] = TI;
return TI;
}
/// Emit all the deferred complete record types. Try to do this in FIFO order,
/// and do this until fixpoint, as each complete record type typically
/// references
/// many other record types.
void CodeViewDebug::emitDeferredCompleteTypes() {
SmallVector<const DICompositeType *, 4> TypesToEmit;
while (!DeferredCompleteTypes.empty()) {
std::swap(DeferredCompleteTypes, TypesToEmit);
for (const DICompositeType *RecordTy : TypesToEmit)
getCompleteTypeIndex(RecordTy);
TypesToEmit.clear();
}
}
void CodeViewDebug::emitLocalVariableList(const FunctionInfo &FI,
ArrayRef<LocalVariable> Locals) {
// Get the sorted list of parameters and emit them first.
SmallVector<const LocalVariable *, 6> Params;
for (const LocalVariable &L : Locals)
if (L.DIVar->isParameter())
Params.push_back(&L);
llvm::sort(Params, [](const LocalVariable *L, const LocalVariable *R) {
return L->DIVar->getArg() < R->DIVar->getArg();
});
for (const LocalVariable *L : Params)
emitLocalVariable(FI, *L);
// Next emit all non-parameters in the order that we found them.
for (const LocalVariable &L : Locals)
if (!L.DIVar->isParameter())
emitLocalVariable(FI, L);
}
/// Only call this on endian-specific types like ulittle16_t and little32_t, or
/// structs composed of them.
template <typename T>
static void copyBytesForDefRange(SmallString<20> &BytePrefix,
SymbolKind SymKind, const T &DefRangeHeader) {
BytePrefix.resize(2 + sizeof(T));
ulittle16_t SymKindLE = ulittle16_t(SymKind);
memcpy(&BytePrefix[0], &SymKindLE, 2);
memcpy(&BytePrefix[2], &DefRangeHeader, sizeof(T));
}
void CodeViewDebug::emitLocalVariable(const FunctionInfo &FI,
const LocalVariable &Var) {
// LocalSym record, see SymbolRecord.h for more info.
MCSymbol *LocalEnd = beginSymbolRecord(SymbolKind::S_LOCAL);
LocalSymFlags Flags = LocalSymFlags::None;
if (Var.DIVar->isParameter())
Flags |= LocalSymFlags::IsParameter;
if (Var.DefRanges.empty())
Flags |= LocalSymFlags::IsOptimizedOut;
OS.AddComment("TypeIndex");
TypeIndex TI = Var.UseReferenceType
? getTypeIndexForReferenceTo(Var.DIVar->getType())
: getCompleteTypeIndex(Var.DIVar->getType());
OS.EmitIntValue(TI.getIndex(), 4);
OS.AddComment("Flags");
OS.EmitIntValue(static_cast<uint16_t>(Flags), 2);
// Truncate the name so we won't overflow the record length field.
emitNullTerminatedSymbolName(OS, Var.DIVar->getName());
endSymbolRecord(LocalEnd);
// Calculate the on disk prefix of the appropriate def range record. The
// records and on disk formats are described in SymbolRecords.h. BytePrefix
// should be big enough to hold all forms without memory allocation.
SmallString<20> BytePrefix;
for (const LocalVarDefRange &DefRange : Var.DefRanges) {
BytePrefix.clear();
if (DefRange.InMemory) {
int Offset = DefRange.DataOffset;
unsigned Reg = DefRange.CVRegister;
// 32-bit x86 call sequences often use PUSH instructions, which disrupt
// ESP-relative offsets. Use the virtual frame pointer, VFRAME or $T0,
// instead. In frames without stack realignment, $T0 will be the CFA.
if (RegisterId(Reg) == RegisterId::ESP) {
Reg = unsigned(RegisterId::VFRAME);
Offset += FI.OffsetAdjustment;
}
// If we can use the chosen frame pointer for the frame and this isn't a
// sliced aggregate, use the smaller S_DEFRANGE_FRAMEPOINTER_REL record.
// Otherwise, use S_DEFRANGE_REGISTER_REL.
EncodedFramePtrReg EncFP = encodeFramePtrReg(RegisterId(Reg), TheCPU);
if (!DefRange.IsSubfield && EncFP != EncodedFramePtrReg::None &&
(bool(Flags & LocalSymFlags::IsParameter)
? (EncFP == FI.EncodedParamFramePtrReg)
: (EncFP == FI.EncodedLocalFramePtrReg))) {
little32_t FPOffset = little32_t(Offset);
copyBytesForDefRange(BytePrefix, S_DEFRANGE_FRAMEPOINTER_REL, FPOffset);
} else {
uint16_t RegRelFlags = 0;
if (DefRange.IsSubfield) {
RegRelFlags = DefRangeRegisterRelSym::IsSubfieldFlag |
(DefRange.StructOffset
<< DefRangeRegisterRelSym::OffsetInParentShift);
}
DefRangeRegisterRelSym::Header DRHdr;
DRHdr.Register = Reg;
DRHdr.Flags = RegRelFlags;
DRHdr.BasePointerOffset = Offset;
copyBytesForDefRange(BytePrefix, S_DEFRANGE_REGISTER_REL, DRHdr);
}
} else {
assert(DefRange.DataOffset == 0 && "unexpected offset into register");
if (DefRange.IsSubfield) {
DefRangeSubfieldRegisterSym::Header DRHdr;
DRHdr.Register = DefRange.CVRegister;
DRHdr.MayHaveNoName = 0;
DRHdr.OffsetInParent = DefRange.StructOffset;
copyBytesForDefRange(BytePrefix, S_DEFRANGE_SUBFIELD_REGISTER, DRHdr);
} else {
DefRangeRegisterSym::Header DRHdr;
DRHdr.Register = DefRange.CVRegister;
DRHdr.MayHaveNoName = 0;
copyBytesForDefRange(BytePrefix, S_DEFRANGE_REGISTER, DRHdr);
}
}
OS.EmitCVDefRangeDirective(DefRange.Ranges, BytePrefix);
}
}
void CodeViewDebug::emitLexicalBlockList(ArrayRef<LexicalBlock *> Blocks,
const FunctionInfo& FI) {
for (LexicalBlock *Block : Blocks)
emitLexicalBlock(*Block, FI);
}
/// Emit an S_BLOCK32 and S_END record pair delimiting the contents of a
/// lexical block scope.
void CodeViewDebug::emitLexicalBlock(const LexicalBlock &Block,
const FunctionInfo& FI) {
MCSymbol *RecordEnd = beginSymbolRecord(SymbolKind::S_BLOCK32);
OS.AddComment("PtrParent");
OS.EmitIntValue(0, 4); // PtrParent
OS.AddComment("PtrEnd");
OS.EmitIntValue(0, 4); // PtrEnd
OS.AddComment("Code size");
OS.emitAbsoluteSymbolDiff(Block.End, Block.Begin, 4); // Code Size
OS.AddComment("Function section relative address");
OS.EmitCOFFSecRel32(Block.Begin, /*Offset=*/0); // Func Offset
OS.AddComment("Function section index");
OS.EmitCOFFSectionIndex(FI.Begin); // Func Symbol
OS.AddComment("Lexical block name");
emitNullTerminatedSymbolName(OS, Block.Name); // Name
endSymbolRecord(RecordEnd);
// Emit variables local to this lexical block.
emitLocalVariableList(FI, Block.Locals);
emitGlobalVariableList(Block.Globals);
// Emit lexical blocks contained within this block.
emitLexicalBlockList(Block.Children, FI);
// Close the lexical block scope.
emitEndSymbolRecord(SymbolKind::S_END);
}
/// Convenience routine for collecting lexical block information for a list
/// of lexical scopes.
void CodeViewDebug::collectLexicalBlockInfo(
SmallVectorImpl<LexicalScope *> &Scopes,
SmallVectorImpl<LexicalBlock *> &Blocks,
SmallVectorImpl<LocalVariable> &Locals,
SmallVectorImpl<CVGlobalVariable> &Globals) {
for (LexicalScope *Scope : Scopes)
collectLexicalBlockInfo(*Scope, Blocks, Locals, Globals);
}
/// Populate the lexical blocks and local variable lists of the parent with
/// information about the specified lexical scope.
void CodeViewDebug::collectLexicalBlockInfo(
LexicalScope &Scope,
SmallVectorImpl<LexicalBlock *> &ParentBlocks,
SmallVectorImpl<LocalVariable> &ParentLocals,
SmallVectorImpl<CVGlobalVariable> &ParentGlobals) {
if (Scope.isAbstractScope())
return;
// Gather information about the lexical scope including local variables,
// global variables, and address ranges.
bool IgnoreScope = false;
auto LI = ScopeVariables.find(&Scope);
SmallVectorImpl<LocalVariable> *Locals =
LI != ScopeVariables.end() ? &LI->second : nullptr;
auto GI = ScopeGlobals.find(Scope.getScopeNode());
SmallVectorImpl<CVGlobalVariable> *Globals =
GI != ScopeGlobals.end() ? GI->second.get() : nullptr;
const DILexicalBlock *DILB = dyn_cast<DILexicalBlock>(Scope.getScopeNode());
const SmallVectorImpl<InsnRange> &Ranges = Scope.getRanges();
// Ignore lexical scopes which do not contain variables.
if (!Locals && !Globals)
IgnoreScope = true;
// Ignore lexical scopes which are not lexical blocks.
if (!DILB)
IgnoreScope = true;
// Ignore scopes which have too many address ranges to represent in the
// current CodeView format or do not have a valid address range.
//
// For lexical scopes with multiple address ranges you may be tempted to
// construct a single range covering every instruction where the block is
// live and everything in between. Unfortunately, Visual Studio only
// displays variables from the first matching lexical block scope. If the
// first lexical block contains exception handling code or cold code which
// is moved to the bottom of the routine creating a single range covering
// nearly the entire routine, then it will hide all other lexical blocks
// and the variables they contain.
if (Ranges.size() != 1 || !getLabelAfterInsn(Ranges.front().second))
IgnoreScope = true;
if (IgnoreScope) {
// This scope can be safely ignored and eliminating it will reduce the
// size of the debug information. Be sure to collect any variable and scope
// information from the this scope or any of its children and collapse them
// into the parent scope.
if (Locals)
ParentLocals.append(Locals->begin(), Locals->end());
if (Globals)
ParentGlobals.append(Globals->begin(), Globals->end());
collectLexicalBlockInfo(Scope.getChildren(),
ParentBlocks,
ParentLocals,
ParentGlobals);
return;
}
// Create a new CodeView lexical block for this lexical scope. If we've
// seen this DILexicalBlock before then the scope tree is malformed and
// we can handle this gracefully by not processing it a second time.
auto BlockInsertion = CurFn->LexicalBlocks.insert({DILB, LexicalBlock()});
if (!BlockInsertion.second)
return;
// Create a lexical block containing the variables and collect the the
// lexical block information for the children.
const InsnRange &Range = Ranges.front();
assert(Range.first && Range.second);
LexicalBlock &Block = BlockInsertion.first->second;
Block.Begin = getLabelBeforeInsn(Range.first);
Block.End = getLabelAfterInsn(Range.second);
assert(Block.Begin && "missing label for scope begin");
assert(Block.End && "missing label for scope end");
Block.Name = DILB->getName();
if (Locals)
Block.Locals = std::move(*Locals);
if (Globals)
Block.Globals = std::move(*Globals);
ParentBlocks.push_back(&Block);
collectLexicalBlockInfo(Scope.getChildren(),
Block.Children,
Block.Locals,
Block.Globals);
}
void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) {
const Function &GV = MF->getFunction();
assert(FnDebugInfo.count(&GV));
assert(CurFn == FnDebugInfo[&GV].get());
collectVariableInfo(GV.getSubprogram());
// Build the lexical block structure to emit for this routine.
if (LexicalScope *CFS = LScopes.getCurrentFunctionScope())
collectLexicalBlockInfo(*CFS,
CurFn->ChildBlocks,
CurFn->Locals,
CurFn->Globals);
// Clear the scope and variable information from the map which will not be
// valid after we have finished processing this routine. This also prepares
// the map for the subsequent routine.
ScopeVariables.clear();
// Don't emit anything if we don't have any line tables.
// Thunks are compiler-generated and probably won't have source correlation.
if (!CurFn->HaveLineInfo && !GV.getSubprogram()->isThunk()) {
FnDebugInfo.erase(&GV);
CurFn = nullptr;
return;
}
+ // Find heap alloc sites and add to list.
+ for (const auto &MBB : *MF) {
+ for (const auto &MI : MBB) {
+ if (MDNode *MD = MI.getHeapAllocMarker()) {
+ CurFn->HeapAllocSites.push_back(std::make_tuple(getLabelBeforeInsn(&MI),
+ getLabelAfterInsn(&MI),
+ dyn_cast<DIType>(MD)));
+ }
+ }
+ }
+
CurFn->Annotations = MF->getCodeViewAnnotations();
- CurFn->HeapAllocSites = MF->getCodeViewHeapAllocSites();
CurFn->End = Asm->getFunctionEnd();
CurFn = nullptr;
}
void CodeViewDebug::beginInstruction(const MachineInstr *MI) {
DebugHandlerBase::beginInstruction(MI);
// Ignore DBG_VALUE and DBG_LABEL locations and function prologue.
if (!Asm || !CurFn || MI->isDebugInstr() ||
MI->getFlag(MachineInstr::FrameSetup))
return;
// If the first instruction of a new MBB has no location, find the first
// instruction with a location and use that.
DebugLoc DL = MI->getDebugLoc();
if (!DL && MI->getParent() != PrevInstBB) {
for (const auto &NextMI : *MI->getParent()) {
if (NextMI.isDebugInstr())
continue;
DL = NextMI.getDebugLoc();
if (DL)
break;
}
}
PrevInstBB = MI->getParent();
// If we still don't have a debug location, don't record a location.
if (!DL)
return;
maybeRecordLocation(DL, Asm->MF);
}
MCSymbol *CodeViewDebug::beginCVSubsection(DebugSubsectionKind Kind) {
MCSymbol *BeginLabel = MMI->getContext().createTempSymbol(),
*EndLabel = MMI->getContext().createTempSymbol();
OS.EmitIntValue(unsigned(Kind), 4);
OS.AddComment("Subsection size");
OS.emitAbsoluteSymbolDiff(EndLabel, BeginLabel, 4);
OS.EmitLabel(BeginLabel);
return EndLabel;
}
void CodeViewDebug::endCVSubsection(MCSymbol *EndLabel) {
OS.EmitLabel(EndLabel);
// Every subsection must be aligned to a 4-byte boundary.
OS.EmitValueToAlignment(4);
}
static StringRef getSymbolName(SymbolKind SymKind) {
for (const EnumEntry<SymbolKind> &EE : getSymbolTypeNames())
if (EE.Value == SymKind)
return EE.Name;
return "";
}
MCSymbol *CodeViewDebug::beginSymbolRecord(SymbolKind SymKind) {
MCSymbol *BeginLabel = MMI->getContext().createTempSymbol(),
*EndLabel = MMI->getContext().createTempSymbol();
OS.AddComment("Record length");
OS.emitAbsoluteSymbolDiff(EndLabel, BeginLabel, 2);
OS.EmitLabel(BeginLabel);
if (OS.isVerboseAsm())
OS.AddComment("Record kind: " + getSymbolName(SymKind));
OS.EmitIntValue(unsigned(SymKind), 2);
return EndLabel;
}
void CodeViewDebug::endSymbolRecord(MCSymbol *SymEnd) {
// MSVC does not pad out symbol records to four bytes, but LLVM does to avoid
// an extra copy of every symbol record in LLD. This increases object file
// size by less than 1% in the clang build, and is compatible with the Visual
// C++ linker.
OS.EmitValueToAlignment(4);
OS.EmitLabel(SymEnd);
}
void CodeViewDebug::emitEndSymbolRecord(SymbolKind EndKind) {
OS.AddComment("Record length");
OS.EmitIntValue(2, 2);
if (OS.isVerboseAsm())
OS.AddComment("Record kind: " + getSymbolName(EndKind));
OS.EmitIntValue(unsigned(EndKind), 2); // Record Kind
}
void CodeViewDebug::emitDebugInfoForUDTs(
ArrayRef<std::pair<std::string, const DIType *>> UDTs) {
for (const auto &UDT : UDTs) {
const DIType *T = UDT.second;
assert(shouldEmitUdt(T));
MCSymbol *UDTRecordEnd = beginSymbolRecord(SymbolKind::S_UDT);
OS.AddComment("Type");
OS.EmitIntValue(getCompleteTypeIndex(T).getIndex(), 4);
emitNullTerminatedSymbolName(OS, UDT.first);
endSymbolRecord(UDTRecordEnd);
}
}
void CodeViewDebug::collectGlobalVariableInfo() {
DenseMap<const DIGlobalVariableExpression *, const GlobalVariable *>
GlobalMap;
for (const GlobalVariable &GV : MMI->getModule()->globals()) {
SmallVector<DIGlobalVariableExpression *, 1> GVEs;
GV.getDebugInfo(GVEs);
for (const auto *GVE : GVEs)
GlobalMap[GVE] = &GV;
}
NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
for (const MDNode *Node : CUs->operands()) {
const auto *CU = cast<DICompileUnit>(Node);
for (const auto *GVE : CU->getGlobalVariables()) {
const DIGlobalVariable *DIGV = GVE->getVariable();
const DIExpression *DIE = GVE->getExpression();
// Emit constant global variables in a global symbol section.
if (GlobalMap.count(GVE) == 0 && DIE->isConstant()) {
CVGlobalVariable CVGV = {DIGV, DIE};
GlobalVariables.emplace_back(std::move(CVGV));
}
const auto *GV = GlobalMap.lookup(GVE);
if (!GV || GV->isDeclarationForLinker())
continue;
DIScope *Scope = DIGV->getScope();
SmallVector<CVGlobalVariable, 1> *VariableList;
if (Scope && isa<DILocalScope>(Scope)) {
// Locate a global variable list for this scope, creating one if
// necessary.
auto Insertion = ScopeGlobals.insert(
{Scope, std::unique_ptr<GlobalVariableList>()});
if (Insertion.second)
Insertion.first->second = llvm::make_unique<GlobalVariableList>();
VariableList = Insertion.first->second.get();
} else if (GV->hasComdat())
// Emit this global variable into a COMDAT section.
VariableList = &ComdatVariables;
else
// Emit this global variable in a single global symbol section.
VariableList = &GlobalVariables;
CVGlobalVariable CVGV = {DIGV, GV};
VariableList->emplace_back(std::move(CVGV));
}
}
}
void CodeViewDebug::emitDebugInfoForGlobals() {
// First, emit all globals that are not in a comdat in a single symbol
// substream. MSVC doesn't like it if the substream is empty, so only open
// it if we have at least one global to emit.
switchToDebugSectionForSymbol(nullptr);
if (!GlobalVariables.empty()) {
OS.AddComment("Symbol subsection for globals");
MCSymbol *EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
emitGlobalVariableList(GlobalVariables);
endCVSubsection(EndLabel);
}
// Second, emit each global that is in a comdat into its own .debug$S
// section along with its own symbol substream.
for (const CVGlobalVariable &CVGV : ComdatVariables) {
const GlobalVariable *GV = CVGV.GVInfo.get<const GlobalVariable *>();
MCSymbol *GVSym = Asm->getSymbol(GV);
OS.AddComment("Symbol subsection for " +
Twine(GlobalValue::dropLLVMManglingEscape(GV->getName())));
switchToDebugSectionForSymbol(GVSym);
MCSymbol *EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
// FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
emitDebugInfoForGlobal(CVGV);
endCVSubsection(EndLabel);
}
}
void CodeViewDebug::emitDebugInfoForRetainedTypes() {
NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
for (const MDNode *Node : CUs->operands()) {
for (auto *Ty : cast<DICompileUnit>(Node)->getRetainedTypes()) {
if (DIType *RT = dyn_cast<DIType>(Ty)) {
getTypeIndex(RT);
// FIXME: Add to global/local DTU list.
}
}
}
}
// Emit each global variable in the specified array.
void CodeViewDebug::emitGlobalVariableList(ArrayRef<CVGlobalVariable> Globals) {
for (const CVGlobalVariable &CVGV : Globals) {
// FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
emitDebugInfoForGlobal(CVGV);
}
}
void CodeViewDebug::emitDebugInfoForGlobal(const CVGlobalVariable &CVGV) {
const DIGlobalVariable *DIGV = CVGV.DIGV;
if (const GlobalVariable *GV =
CVGV.GVInfo.dyn_cast<const GlobalVariable *>()) {
// DataSym record, see SymbolRecord.h for more info. Thread local data
// happens to have the same format as global data.
MCSymbol *GVSym = Asm->getSymbol(GV);
SymbolKind DataSym = GV->isThreadLocal()
? (DIGV->isLocalToUnit() ? SymbolKind::S_LTHREAD32
: SymbolKind::S_GTHREAD32)
: (DIGV->isLocalToUnit() ? SymbolKind::S_LDATA32
: SymbolKind::S_GDATA32);
MCSymbol *DataEnd = beginSymbolRecord(DataSym);
OS.AddComment("Type");
OS.EmitIntValue(getCompleteTypeIndex(DIGV->getType()).getIndex(), 4);
OS.AddComment("DataOffset");
OS.EmitCOFFSecRel32(GVSym, /*Offset=*/0);
OS.AddComment("Segment");
OS.EmitCOFFSectionIndex(GVSym);
OS.AddComment("Name");
const unsigned LengthOfDataRecord = 12;
emitNullTerminatedSymbolName(OS, DIGV->getName(), LengthOfDataRecord);
endSymbolRecord(DataEnd);
} else {
// FIXME: Currently this only emits the global variables in the IR metadata.
// This should also emit enums and static data members.
const DIExpression *DIE = CVGV.GVInfo.get<const DIExpression *>();
assert(DIE->isConstant() &&
"Global constant variables must contain a constant expression.");
uint64_t Val = DIE->getElement(1);
MCSymbol *SConstantEnd = beginSymbolRecord(SymbolKind::S_CONSTANT);
OS.AddComment("Type");
OS.EmitIntValue(getTypeIndex(DIGV->getType()).getIndex(), 4);
OS.AddComment("Value");
// Encoded integers shouldn't need more than 10 bytes.
uint8_t data[10];
BinaryStreamWriter Writer(data, llvm::support::endianness::little);
CodeViewRecordIO IO(Writer);
cantFail(IO.mapEncodedInteger(Val));
StringRef SRef((char *)data, Writer.getOffset());
OS.EmitBinaryData(SRef);
OS.AddComment("Name");
const DIScope *Scope = DIGV->getScope();
// For static data members, get the scope from the declaration.
if (const auto *MemberDecl = dyn_cast_or_null<DIDerivedType>(
DIGV->getRawStaticDataMemberDeclaration()))
Scope = MemberDecl->getScope();
emitNullTerminatedSymbolName(OS,
getFullyQualifiedName(Scope, DIGV->getName()));
endSymbolRecord(SConstantEnd);
}
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h (revision 356465)
@@ -1,468 +1,469 @@
//===- llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing Microsoft CodeView debug info.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_CODEVIEWDEBUG_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_CODEVIEWDEBUG_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
#include "llvm/CodeGen/DebugHandlerBase.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <map>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
namespace llvm {
struct ClassInfo;
class StringRef;
class AsmPrinter;
class Function;
class GlobalVariable;
class MCSectionCOFF;
class MCStreamer;
class MCSymbol;
class MachineFunction;
/// Collects and handles line tables information in a CodeView format.
class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase {
MCStreamer &OS;
BumpPtrAllocator Allocator;
codeview::GlobalTypeTableBuilder TypeTable;
/// Whether to emit type record hashes into .debug$H.
bool EmitDebugGlobalHashes = false;
/// The codeview CPU type used by the translation unit.
codeview::CPUType TheCPU;
/// Represents the most general definition range.
struct LocalVarDefRange {
/// Indicates that variable data is stored in memory relative to the
/// specified register.
int InMemory : 1;
/// Offset of variable data in memory.
int DataOffset : 31;
/// Non-zero if this is a piece of an aggregate.
uint16_t IsSubfield : 1;
/// Offset into aggregate.
uint16_t StructOffset : 15;
/// Register containing the data or the register base of the memory
/// location containing the data.
uint16_t CVRegister;
/// Compares all location fields. This includes all fields except the label
/// ranges.
bool isDifferentLocation(LocalVarDefRange &O) {
return InMemory != O.InMemory || DataOffset != O.DataOffset ||
IsSubfield != O.IsSubfield || StructOffset != O.StructOffset ||
CVRegister != O.CVRegister;
}
SmallVector<std::pair<const MCSymbol *, const MCSymbol *>, 1> Ranges;
};
static LocalVarDefRange createDefRangeMem(uint16_t CVRegister, int Offset);
/// Similar to DbgVariable in DwarfDebug, but not dwarf-specific.
struct LocalVariable {
const DILocalVariable *DIVar = nullptr;
SmallVector<LocalVarDefRange, 1> DefRanges;
bool UseReferenceType = false;
};
struct CVGlobalVariable {
const DIGlobalVariable *DIGV;
PointerUnion<const GlobalVariable *, const DIExpression *> GVInfo;
};
struct InlineSite {
SmallVector<LocalVariable, 1> InlinedLocals;
SmallVector<const DILocation *, 1> ChildSites;
const DISubprogram *Inlinee = nullptr;
/// The ID of the inline site or function used with .cv_loc. Not a type
/// index.
unsigned SiteFuncId = 0;
};
// Combines information from DILexicalBlock and LexicalScope.
struct LexicalBlock {
SmallVector<LocalVariable, 1> Locals;
SmallVector<CVGlobalVariable, 1> Globals;
SmallVector<LexicalBlock *, 1> Children;
const MCSymbol *Begin;
const MCSymbol *End;
StringRef Name;
};
// For each function, store a vector of labels to its instructions, as well as
// to the end of the function.
struct FunctionInfo {
FunctionInfo() = default;
// Uncopyable.
FunctionInfo(const FunctionInfo &FI) = delete;
/// Map from inlined call site to inlined instructions and child inlined
/// call sites. Listed in program order.
std::unordered_map<const DILocation *, InlineSite> InlineSites;
/// Ordered list of top-level inlined call sites.
SmallVector<const DILocation *, 1> ChildSites;
SmallVector<LocalVariable, 1> Locals;
SmallVector<CVGlobalVariable, 1> Globals;
std::unordered_map<const DILexicalBlockBase*, LexicalBlock> LexicalBlocks;
// Lexical blocks containing local variables.
SmallVector<LexicalBlock *, 1> ChildBlocks;
std::vector<std::pair<MCSymbol *, MDNode *>> Annotations;
- std::vector<std::tuple<MCSymbol *, MCSymbol *, DIType *>> HeapAllocSites;
+ std::vector<std::tuple<const MCSymbol *, const MCSymbol *, const DIType *>>
+ HeapAllocSites;
const MCSymbol *Begin = nullptr;
const MCSymbol *End = nullptr;
unsigned FuncId = 0;
unsigned LastFileId = 0;
/// Number of bytes allocated in the prologue for all local stack objects.
unsigned FrameSize = 0;
/// Number of bytes of parameters on the stack.
unsigned ParamSize = 0;
/// Number of bytes pushed to save CSRs.
unsigned CSRSize = 0;
/// Adjustment to apply on x86 when using the VFRAME frame pointer.
int OffsetAdjustment = 0;
/// Two-bit value indicating which register is the designated frame pointer
/// register for local variables. Included in S_FRAMEPROC.
codeview::EncodedFramePtrReg EncodedLocalFramePtrReg =
codeview::EncodedFramePtrReg::None;
/// Two-bit value indicating which register is the designated frame pointer
/// register for stack parameters. Included in S_FRAMEPROC.
codeview::EncodedFramePtrReg EncodedParamFramePtrReg =
codeview::EncodedFramePtrReg::None;
codeview::FrameProcedureOptions FrameProcOpts;
bool HasStackRealignment = false;
bool HaveLineInfo = false;
};
FunctionInfo *CurFn = nullptr;
// Map used to seperate variables according to the lexical scope they belong
// in. This is populated by recordLocalVariable() before
// collectLexicalBlocks() separates the variables between the FunctionInfo
// and LexicalBlocks.
DenseMap<const LexicalScope *, SmallVector<LocalVariable, 1>> ScopeVariables;
// Map to separate global variables according to the lexical scope they
// belong in. A null local scope represents the global scope.
typedef SmallVector<CVGlobalVariable, 1> GlobalVariableList;
DenseMap<const DIScope*, std::unique_ptr<GlobalVariableList> > ScopeGlobals;
// Array of global variables which need to be emitted into a COMDAT section.
SmallVector<CVGlobalVariable, 1> ComdatVariables;
// Array of non-COMDAT global variables.
SmallVector<CVGlobalVariable, 1> GlobalVariables;
/// The set of comdat .debug$S sections that we've seen so far. Each section
/// must start with a magic version number that must only be emitted once.
/// This set tracks which sections we've already opened.
DenseSet<MCSectionCOFF *> ComdatDebugSections;
/// Switch to the appropriate .debug$S section for GVSym. If GVSym, the symbol
/// of an emitted global value, is in a comdat COFF section, this will switch
/// to a new .debug$S section in that comdat. This method ensures that the
/// section starts with the magic version number on first use. If GVSym is
/// null, uses the main .debug$S section.
void switchToDebugSectionForSymbol(const MCSymbol *GVSym);
/// The next available function index for use with our .cv_* directives. Not
/// to be confused with type indices for LF_FUNC_ID records.
unsigned NextFuncId = 0;
InlineSite &getInlineSite(const DILocation *InlinedAt,
const DISubprogram *Inlinee);
codeview::TypeIndex getFuncIdForSubprogram(const DISubprogram *SP);
void calculateRanges(LocalVariable &Var,
const DbgValueHistoryMap::Entries &Entries);
static void collectInlineSiteChildren(SmallVectorImpl<unsigned> &Children,
const FunctionInfo &FI,
const InlineSite &Site);
/// Remember some debug info about each function. Keep it in a stable order to
/// emit at the end of the TU.
MapVector<const Function *, std::unique_ptr<FunctionInfo>> FnDebugInfo;
/// Map from full file path to .cv_file id. Full paths are built from DIFiles
/// and are stored in FileToFilepathMap;
DenseMap<StringRef, unsigned> FileIdMap;
/// All inlined subprograms in the order they should be emitted.
SmallSetVector<const DISubprogram *, 4> InlinedSubprograms;
/// Map from a pair of DI metadata nodes and its DI type (or scope) that can
/// be nullptr, to CodeView type indices. Primarily indexed by
/// {DIType*, DIType*} and {DISubprogram*, DIType*}.
///
/// The second entry in the key is needed for methods as DISubroutineType
/// representing static method type are shared with non-method function type.
DenseMap<std::pair<const DINode *, const DIType *>, codeview::TypeIndex>
TypeIndices;
/// Map from DICompositeType* to complete type index. Non-record types are
/// always looked up in the normal TypeIndices map.
DenseMap<const DICompositeType *, codeview::TypeIndex> CompleteTypeIndices;
/// Complete record types to emit after all active type lowerings are
/// finished.
SmallVector<const DICompositeType *, 4> DeferredCompleteTypes;
/// Number of type lowering frames active on the stack.
unsigned TypeEmissionLevel = 0;
codeview::TypeIndex VBPType;
const DISubprogram *CurrentSubprogram = nullptr;
// The UDTs we have seen while processing types; each entry is a pair of type
// index and type name.
std::vector<std::pair<std::string, const DIType *>> LocalUDTs;
std::vector<std::pair<std::string, const DIType *>> GlobalUDTs;
using FileToFilepathMapTy = std::map<const DIFile *, std::string>;
FileToFilepathMapTy FileToFilepathMap;
StringRef getFullFilepath(const DIFile *File);
unsigned maybeRecordFile(const DIFile *F);
void maybeRecordLocation(const DebugLoc &DL, const MachineFunction *MF);
void clear();
void setCurrentSubprogram(const DISubprogram *SP) {
CurrentSubprogram = SP;
LocalUDTs.clear();
}
/// Emit the magic version number at the start of a CodeView type or symbol
/// section. Appears at the front of every .debug$S or .debug$T or .debug$P
/// section.
void emitCodeViewMagicVersion();
void emitTypeInformation();
void emitTypeGlobalHashes();
void emitCompilerInformation();
void emitBuildInfo();
void emitInlineeLinesSubsection();
void emitDebugInfoForThunk(const Function *GV,
FunctionInfo &FI,
const MCSymbol *Fn);
void emitDebugInfoForFunction(const Function *GV, FunctionInfo &FI);
void emitDebugInfoForRetainedTypes();
void
emitDebugInfoForUDTs(ArrayRef<std::pair<std::string, const DIType *>> UDTs);
void emitDebugInfoForGlobals();
void emitGlobalVariableList(ArrayRef<CVGlobalVariable> Globals);
void emitDebugInfoForGlobal(const CVGlobalVariable &CVGV);
/// Opens a subsection of the given kind in a .debug$S codeview section.
/// Returns an end label for use with endCVSubsection when the subsection is
/// finished.
MCSymbol *beginCVSubsection(codeview::DebugSubsectionKind Kind);
void endCVSubsection(MCSymbol *EndLabel);
/// Opens a symbol record of the given kind. Returns an end label for use with
/// endSymbolRecord.
MCSymbol *beginSymbolRecord(codeview::SymbolKind Kind);
void endSymbolRecord(MCSymbol *SymEnd);
/// Emits an S_END, S_INLINESITE_END, or S_PROC_ID_END record. These records
/// are empty, so we emit them with a simpler assembly sequence that doesn't
/// involve labels.
void emitEndSymbolRecord(codeview::SymbolKind EndKind);
void emitInlinedCallSite(const FunctionInfo &FI, const DILocation *InlinedAt,
const InlineSite &Site);
using InlinedEntity = DbgValueHistoryMap::InlinedEntity;
void collectGlobalVariableInfo();
void collectVariableInfo(const DISubprogram *SP);
void collectVariableInfoFromMFTable(DenseSet<InlinedEntity> &Processed);
// Construct the lexical block tree for a routine, pruning emptpy lexical
// scopes, and populate it with local variables.
void collectLexicalBlockInfo(SmallVectorImpl<LexicalScope *> &Scopes,
SmallVectorImpl<LexicalBlock *> &Blocks,
SmallVectorImpl<LocalVariable> &Locals,
SmallVectorImpl<CVGlobalVariable> &Globals);
void collectLexicalBlockInfo(LexicalScope &Scope,
SmallVectorImpl<LexicalBlock *> &ParentBlocks,
SmallVectorImpl<LocalVariable> &ParentLocals,
SmallVectorImpl<CVGlobalVariable> &ParentGlobals);
/// Records information about a local variable in the appropriate scope. In
/// particular, locals from inlined code live inside the inlining site.
void recordLocalVariable(LocalVariable &&Var, const LexicalScope *LS);
/// Emits local variables in the appropriate order.
void emitLocalVariableList(const FunctionInfo &FI,
ArrayRef<LocalVariable> Locals);
/// Emits an S_LOCAL record and its associated defined ranges.
void emitLocalVariable(const FunctionInfo &FI, const LocalVariable &Var);
/// Emits a sequence of lexical block scopes and their children.
void emitLexicalBlockList(ArrayRef<LexicalBlock *> Blocks,
const FunctionInfo& FI);
/// Emit a lexical block scope and its children.
void emitLexicalBlock(const LexicalBlock &Block, const FunctionInfo& FI);
/// Translates the DIType to codeview if necessary and returns a type index
/// for it.
codeview::TypeIndex getTypeIndex(const DIType *Ty,
const DIType *ClassTy = nullptr);
codeview::TypeIndex
getTypeIndexForThisPtr(const DIDerivedType *PtrTy,
const DISubroutineType *SubroutineTy);
codeview::TypeIndex getTypeIndexForReferenceTo(const DIType *Ty);
codeview::TypeIndex getMemberFunctionType(const DISubprogram *SP,
const DICompositeType *Class);
codeview::TypeIndex getScopeIndex(const DIScope *Scope);
codeview::TypeIndex getVBPTypeIndex();
void addToUDTs(const DIType *Ty);
void addUDTSrcLine(const DIType *Ty, codeview::TypeIndex TI);
codeview::TypeIndex lowerType(const DIType *Ty, const DIType *ClassTy);
codeview::TypeIndex lowerTypeAlias(const DIDerivedType *Ty);
codeview::TypeIndex lowerTypeArray(const DICompositeType *Ty);
codeview::TypeIndex lowerTypeBasic(const DIBasicType *Ty);
codeview::TypeIndex lowerTypePointer(
const DIDerivedType *Ty,
codeview::PointerOptions PO = codeview::PointerOptions::None);
codeview::TypeIndex lowerTypeMemberPointer(
const DIDerivedType *Ty,
codeview::PointerOptions PO = codeview::PointerOptions::None);
codeview::TypeIndex lowerTypeModifier(const DIDerivedType *Ty);
codeview::TypeIndex lowerTypeFunction(const DISubroutineType *Ty);
codeview::TypeIndex lowerTypeVFTableShape(const DIDerivedType *Ty);
codeview::TypeIndex lowerTypeMemberFunction(
const DISubroutineType *Ty, const DIType *ClassTy, int ThisAdjustment,
bool IsStaticMethod,
codeview::FunctionOptions FO = codeview::FunctionOptions::None);
codeview::TypeIndex lowerTypeEnum(const DICompositeType *Ty);
codeview::TypeIndex lowerTypeClass(const DICompositeType *Ty);
codeview::TypeIndex lowerTypeUnion(const DICompositeType *Ty);
/// Symbol records should point to complete types, but type records should
/// always point to incomplete types to avoid cycles in the type graph. Only
/// use this entry point when generating symbol records. The complete and
/// incomplete type indices only differ for record types. All other types use
/// the same index.
codeview::TypeIndex getCompleteTypeIndex(const DIType *Ty);
codeview::TypeIndex lowerCompleteTypeClass(const DICompositeType *Ty);
codeview::TypeIndex lowerCompleteTypeUnion(const DICompositeType *Ty);
struct TypeLoweringScope;
void emitDeferredCompleteTypes();
void collectMemberInfo(ClassInfo &Info, const DIDerivedType *DDTy);
ClassInfo collectClassInfo(const DICompositeType *Ty);
/// Common record member lowering functionality for record types, which are
/// structs, classes, and unions. Returns the field list index and the member
/// count.
std::tuple<codeview::TypeIndex, codeview::TypeIndex, unsigned, bool>
lowerRecordFieldList(const DICompositeType *Ty);
/// Inserts {{Node, ClassTy}, TI} into TypeIndices and checks for duplicates.
codeview::TypeIndex recordTypeIndexForDINode(const DINode *Node,
codeview::TypeIndex TI,
const DIType *ClassTy = nullptr);
unsigned getPointerSizeInBytes();
protected:
/// Gather pre-function debug information.
void beginFunctionImpl(const MachineFunction *MF) override;
/// Gather post-function debug information.
void endFunctionImpl(const MachineFunction *) override;
public:
CodeViewDebug(AsmPrinter *AP);
void setSymbolSize(const MCSymbol *, uint64_t) override {}
/// Emit the COFF section that holds the line table information.
void endModule() override;
/// Process beginning of an instruction.
void beginInstruction(const MachineInstr *MI) override;
};
} // end namespace llvm
#endif // LLVM_LIB_CODEGEN_ASMPRINTER_CODEVIEWDEBUG_H
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (revision 356465)
@@ -1,2392 +1,2392 @@
//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements the IRTranslator class.
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#define DEBUG_TYPE "irtranslator"
using namespace llvm;
static cl::opt<bool>
EnableCSEInIRTranslator("enable-cse-in-irtranslator",
cl::desc("Should enable CSE in irtranslator"),
cl::Optional, cl::init(false));
char IRTranslator::ID = 0;
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
false, false)
static void reportTranslationError(MachineFunction &MF,
const TargetPassConfig &TPC,
OptimizationRemarkEmitter &ORE,
OptimizationRemarkMissed &R) {
MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
// Print the function name explicitly if we don't have a debug location (which
// makes the diagnostic less useful) or if we're going to emit a raw error.
if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
R << (" (in function: " + MF.getName() + ")").str();
if (TPC.isGlobalISelAbortEnabled())
report_fatal_error(R.getMsg());
else
ORE.emit(R);
}
IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
#ifndef NDEBUG
namespace {
/// Verify that every instruction created has the same DILocation as the
/// instruction being translated.
class DILocationVerifier : public GISelChangeObserver {
const Instruction *CurrInst = nullptr;
public:
DILocationVerifier() = default;
~DILocationVerifier() = default;
const Instruction *getCurrentInst() const { return CurrInst; }
void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
void erasingInstr(MachineInstr &MI) override {}
void changingInstr(MachineInstr &MI) override {}
void changedInstr(MachineInstr &MI) override {}
void createdInstr(MachineInstr &MI) override {
assert(getCurrentInst() && "Inserted instruction without a current MI");
// Only print the check message if we're actually checking it.
#ifndef NDEBUG
LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
<< " was copied to " << MI);
#endif
// We allow insts in the entry block to have a debug loc line of 0 because
// they could have originated from constants, and we don't want a jumpy
// debug experience.
assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
MI.getDebugLoc().getLine() == 0) &&
"Line info was not transferred to all instructions");
}
};
} // namespace
#endif // ifndef NDEBUG
void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<StackProtector>();
AU.addRequired<TargetPassConfig>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
IRTranslator::ValueToVRegInfo::VRegListT &
IRTranslator::allocateVRegs(const Value &Val) {
assert(!VMap.contains(Val) && "Value already allocated in VMap");
auto *Regs = VMap.getVRegs(Val);
auto *Offsets = VMap.getOffsets(Val);
SmallVector<LLT, 4> SplitTys;
computeValueLLTs(*DL, *Val.getType(), SplitTys,
Offsets->empty() ? Offsets : nullptr);
for (unsigned i = 0; i < SplitTys.size(); ++i)
Regs->push_back(0);
return *Regs;
}
ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
auto VRegsIt = VMap.findVRegs(Val);
if (VRegsIt != VMap.vregs_end())
return *VRegsIt->second;
if (Val.getType()->isVoidTy())
return *VMap.getVRegs(Val);
// Create entry for this type.
auto *VRegs = VMap.getVRegs(Val);
auto *Offsets = VMap.getOffsets(Val);
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
SmallVector<LLT, 4> SplitTys;
computeValueLLTs(*DL, *Val.getType(), SplitTys,
Offsets->empty() ? Offsets : nullptr);
if (!isa<Constant>(Val)) {
for (auto Ty : SplitTys)
VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
return *VRegs;
}
if (Val.getType()->isAggregateType()) {
// UndefValue, ConstantAggregateZero
auto &C = cast<Constant>(Val);
unsigned Idx = 0;
while (auto Elt = C.getAggregateElement(Idx++)) {
auto EltRegs = getOrCreateVRegs(*Elt);
llvm::copy(EltRegs, std::back_inserter(*VRegs));
}
} else {
assert(SplitTys.size() == 1 && "unexpectedly split LLT");
VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
bool Success = translate(cast<Constant>(Val), VRegs->front());
if (!Success) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
MF->getFunction().getSubprogram(),
&MF->getFunction().getEntryBlock());
R << "unable to translate constant: " << ore::NV("Type", Val.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
return *VRegs;
}
}
return *VRegs;
}
int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
if (FrameIndices.find(&AI) != FrameIndices.end())
return FrameIndices[&AI];
unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
unsigned Size =
ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
// Always allocate at least one byte.
Size = std::max(Size, 1u);
unsigned Alignment = AI.getAlignment();
if (!Alignment)
Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
int &FI = FrameIndices[&AI];
FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
return FI;
}
unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
unsigned Alignment = 0;
Type *ValTy = nullptr;
if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
Alignment = SI->getAlignment();
ValTy = SI->getValueOperand()->getType();
} else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Alignment = LI->getAlignment();
ValTy = LI->getType();
} else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike
// the default alignment for load/store, the default here is to assume
// it has NATURAL alignment, not DataLayout-specified alignment.
const DataLayout &DL = AI->getModule()->getDataLayout();
Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
ValTy = AI->getCompareOperand()->getType();
} else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike
// the default alignment for load/store, the default here is to assume
// it has NATURAL alignment, not DataLayout-specified alignment.
const DataLayout &DL = AI->getModule()->getDataLayout();
Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
ValTy = AI->getType();
} else {
OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
R << "unable to translate memop: " << ore::NV("Opcode", &I);
reportTranslationError(*MF, *TPC, *ORE, R);
return 1;
}
return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
}
MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
MachineBasicBlock *&MBB = BBToMBB[&BB];
assert(MBB && "BasicBlock was not encountered before");
return *MBB;
}
void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
assert(NewPred && "new predecessor must be a real MachineBasicBlock");
MachinePreds[Edge].push_back(NewPred);
}
bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
MachineIRBuilder &MIRBuilder) {
// Get or create a virtual register for each value.
// Unless the value is a Constant => loadimm cst?
// or inline constant each time?
// Creation of a virtual register needs to have a size.
Register Op0 = getOrCreateVReg(*U.getOperand(0));
Register Op1 = getOrCreateVReg(*U.getOperand(1));
Register Res = getOrCreateVReg(U);
uint16_t Flags = 0;
if (isa<Instruction>(U)) {
const Instruction &I = cast<Instruction>(U);
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
return true;
}
bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
// -0.0 - X --> G_FNEG
if (isa<Constant>(U.getOperand(0)) &&
U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
Register Op1 = getOrCreateVReg(*U.getOperand(1));
Register Res = getOrCreateVReg(U);
uint16_t Flags = 0;
if (isa<Instruction>(U)) {
const Instruction &I = cast<Instruction>(U);
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
// Negate the last operand of the FSUB
MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
return true;
}
return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
}
bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
Register Op0 = getOrCreateVReg(*U.getOperand(0));
Register Res = getOrCreateVReg(U);
uint16_t Flags = 0;
if (isa<Instruction>(U)) {
const Instruction &I = cast<Instruction>(U);
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
return true;
}
bool IRTranslator::translateCompare(const User &U,
MachineIRBuilder &MIRBuilder) {
const CmpInst *CI = dyn_cast<CmpInst>(&U);
Register Op0 = getOrCreateVReg(*U.getOperand(0));
Register Op1 = getOrCreateVReg(*U.getOperand(1));
Register Res = getOrCreateVReg(U);
CmpInst::Predicate Pred =
CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
cast<ConstantExpr>(U).getPredicate());
if (CmpInst::isIntPredicate(Pred))
MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else if (Pred == CmpInst::FCMP_FALSE)
MIRBuilder.buildCopy(
Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
else if (Pred == CmpInst::FCMP_TRUE)
MIRBuilder.buildCopy(
Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
else {
MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
MachineInstr::copyFlagsFromInstruction(*CI));
}
return true;
}
bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
const ReturnInst &RI = cast<ReturnInst>(U);
const Value *Ret = RI.getReturnValue();
if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
Ret = nullptr;
ArrayRef<Register> VRegs;
if (Ret)
VRegs = getOrCreateVRegs(*Ret);
Register SwiftErrorVReg = 0;
if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
&RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
}
// The target may mess up with the insertion point, but
// this is not important as a return is the last instruction
// of the block anyway.
return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
}
bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
const BranchInst &BrInst = cast<BranchInst>(U);
unsigned Succ = 0;
if (!BrInst.isUnconditional()) {
// We want a G_BRCOND to the true BB followed by an unconditional branch.
Register Tst = getOrCreateVReg(*BrInst.getCondition());
const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
MachineBasicBlock &TrueBB = getMBB(TrueTgt);
MIRBuilder.buildBrCond(Tst, TrueBB);
}
const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
MachineBasicBlock &TgtBB = getMBB(BrTgt);
MachineBasicBlock &CurBB = MIRBuilder.getMBB();
// If the unconditional target is the layout successor, fallthrough.
if (!CurBB.isLayoutSuccessor(&TgtBB))
MIRBuilder.buildBr(TgtBB);
// Link successors.
for (const BasicBlock *Succ : successors(&BrInst))
CurBB.addSuccessor(&getMBB(*Succ));
return true;
}
void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
MachineBasicBlock *Dst,
BranchProbability Prob) {
if (!FuncInfo.BPI) {
Src->addSuccessorWithoutProb(Dst);
return;
}
if (Prob.isUnknown())
Prob = getEdgeProbability(Src, Dst);
Src->addSuccessor(Dst, Prob);
}
BranchProbability
IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
const BasicBlock *SrcBB = Src->getBasicBlock();
const BasicBlock *DstBB = Dst->getBasicBlock();
if (!FuncInfo.BPI) {
// If BPI is not available, set the default probability as 1 / N, where N is
// the number of successors.
auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
return BranchProbability(1, SuccSize);
}
return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
}
bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
using namespace SwitchCG;
// Extract cases from the switch.
const SwitchInst &SI = cast<SwitchInst>(U);
BranchProbabilityInfo *BPI = FuncInfo.BPI;
CaseClusterVector Clusters;
Clusters.reserve(SI.getNumCases());
for (auto &I : SI.cases()) {
MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
assert(Succ && "Could not find successor mbb in mapping");
const ConstantInt *CaseVal = I.getCaseValue();
BranchProbability Prob =
BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
: BranchProbability(1, SI.getNumCases() + 1);
Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
}
MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
// Cluster adjacent cases with the same destination. We do this at all
// optimization levels because it's cheap to do and will make codegen faster
// if there are many clusters.
sortAndRangeify(Clusters);
MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
// If there is only the default destination, jump there directly.
if (Clusters.empty()) {
SwitchMBB->addSuccessor(DefaultMBB);
if (DefaultMBB != SwitchMBB->getNextNode())
MIB.buildBr(*DefaultMBB);
return true;
}
SL->findJumpTables(Clusters, &SI, DefaultMBB);
LLVM_DEBUG({
dbgs() << "Case clusters: ";
for (const CaseCluster &C : Clusters) {
if (C.Kind == CC_JumpTable)
dbgs() << "JT:";
if (C.Kind == CC_BitTests)
dbgs() << "BT:";
C.Low->getValue().print(dbgs(), true);
if (C.Low != C.High) {
dbgs() << '-';
C.High->getValue().print(dbgs(), true);
}
dbgs() << ' ';
}
dbgs() << '\n';
});
assert(!Clusters.empty());
SwitchWorkList WorkList;
CaseClusterIt First = Clusters.begin();
CaseClusterIt Last = Clusters.end() - 1;
auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
// FIXME: At the moment we don't do any splitting optimizations here like
// SelectionDAG does, so this worklist only has one entry.
while (!WorkList.empty()) {
SwitchWorkListItem W = WorkList.back();
WorkList.pop_back();
if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
return false;
}
return true;
}
void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
MachineBasicBlock *MBB) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
MachineIRBuilder MIB(*MBB->getParent());
MIB.setMBB(*MBB);
MIB.setDebugLoc(CurBuilder->getDebugLoc());
Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
}
bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
SwitchCG::JumpTableHeader &JTH,
MachineBasicBlock *HeaderBB) {
MachineIRBuilder MIB(*HeaderBB->getParent());
MIB.setMBB(*HeaderBB);
MIB.setDebugLoc(CurBuilder->getDebugLoc());
const Value &SValue = *JTH.SValue;
// Subtract the lowest switch case value from the value being switched on.
const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
Register SwitchOpReg = getOrCreateVReg(SValue);
auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
Type *PtrIRTy = SValue.getType()->getPointerTo();
const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
JT.Reg = Sub.getReg(0);
if (JTH.OmitRangeCheck) {
if (JT.MBB != HeaderBB->getNextNode())
MIB.buildBr(*JT.MBB);
return true;
}
// Emit the range check for the jump table, and branch to the default block
// for the switch statement if the value being switched on exceeds the
// largest case in the switch.
auto Cst = getOrCreateVReg(
*ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
// Avoid emitting unnecessary branches to the next block.
if (JT.MBB != HeaderBB->getNextNode())
BrCond = MIB.buildBr(*JT.MBB);
return true;
}
void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
MachineBasicBlock *SwitchBB,
MachineIRBuilder &MIB) {
Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
Register Cond;
DebugLoc OldDbgLoc = MIB.getDebugLoc();
MIB.setDebugLoc(CB.DbgLoc);
MIB.setMBB(*CB.ThisBB);
if (CB.PredInfo.NoCmp) {
// Branch or fall through to TrueBB.
addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
CB.ThisBB);
CB.ThisBB->normalizeSuccProbs();
if (CB.TrueBB != CB.ThisBB->getNextNode())
MIB.buildBr(*CB.TrueBB);
MIB.setDebugLoc(OldDbgLoc);
return;
}
const LLT i1Ty = LLT::scalar(1);
// Build the compare.
if (!CB.CmpMHS) {
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
} else {
- assert(CB.PredInfo.Pred == CmpInst::ICMP_ULE &&
- "Can only handle ULE ranges");
+ assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
+ "Can only handle SLE ranges");
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
Cond =
- MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, CmpOpReg, CondRHS).getReg(0);
+ MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
} else {
const LLT &CmpTy = MRI->getType(CmpOpReg);
auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
auto Diff = MIB.buildConstant(CmpTy, High - Low);
Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
}
}
// Update successor info
addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
CB.ThisBB);
// TrueBB and FalseBB are always different unless the incoming IR is
// degenerate. This only happens when running llc on weird IR.
if (CB.TrueBB != CB.FalseBB)
addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
CB.ThisBB->normalizeSuccProbs();
// if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
CB.ThisBB);
// If the lhs block is the next block, invert the condition so that we can
// fall through to the lhs instead of the rhs block.
if (CB.TrueBB == CB.ThisBB->getNextNode()) {
std::swap(CB.TrueBB, CB.FalseBB);
auto True = MIB.buildConstant(i1Ty, 1);
Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
.getReg(0);
}
MIB.buildBrCond(Cond, *CB.TrueBB);
MIB.buildBr(*CB.FalseBB);
MIB.setDebugLoc(OldDbgLoc);
}
bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *CurMBB,
MachineBasicBlock *DefaultMBB,
MachineIRBuilder &MIB,
MachineFunction::iterator BBI,
BranchProbability UnhandledProbs,
SwitchCG::CaseClusterIt I,
MachineBasicBlock *Fallthrough,
bool FallthroughUnreachable) {
using namespace SwitchCG;
MachineFunction *CurMF = SwitchMBB->getParent();
// FIXME: Optimize away range check based on pivot comparisons.
JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
BranchProbability DefaultProb = W.DefaultProb;
// The jump block hasn't been inserted yet; insert it here.
MachineBasicBlock *JumpMBB = JT->MBB;
CurMF->insert(BBI, JumpMBB);
// Since the jump table block is separate from the switch block, we need
// to keep track of it as a machine predecessor to the default block,
// otherwise we lose the phi edges.
addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
CurMBB);
addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
JumpMBB);
auto JumpProb = I->Prob;
auto FallthroughProb = UnhandledProbs;
// If the default statement is a target of the jump table, we evenly
// distribute the default probability to successors of CurMBB. Also
// update the probability on the edge from JumpMBB to Fallthrough.
for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
SE = JumpMBB->succ_end();
SI != SE; ++SI) {
if (*SI == DefaultMBB) {
JumpProb += DefaultProb / 2;
FallthroughProb -= DefaultProb / 2;
JumpMBB->setSuccProbability(SI, DefaultProb / 2);
JumpMBB->normalizeSuccProbs();
} else {
// Also record edges from the jump table block to it's successors.
addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
JumpMBB);
}
}
// Skip the range check if the fallthrough block is unreachable.
if (FallthroughUnreachable)
JTH->OmitRangeCheck = true;
if (!JTH->OmitRangeCheck)
addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
CurMBB->normalizeSuccProbs();
// The jump table header will be inserted in our current block, do the
// range check, and fall through to our fallthrough block.
JTH->HeaderBB = CurMBB;
JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
// If we're in the right place, emit the jump table header right now.
if (CurMBB == SwitchMBB) {
if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
return false;
JTH->Emitted = true;
}
return true;
}
bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
Value *Cond,
MachineBasicBlock *Fallthrough,
bool FallthroughUnreachable,
BranchProbability UnhandledProbs,
MachineBasicBlock *CurMBB,
MachineIRBuilder &MIB,
MachineBasicBlock *SwitchMBB) {
using namespace SwitchCG;
const Value *RHS, *LHS, *MHS;
CmpInst::Predicate Pred;
if (I->Low == I->High) {
// Check Cond == I->Low.
Pred = CmpInst::ICMP_EQ;
LHS = Cond;
RHS = I->Low;
MHS = nullptr;
} else {
// Check I->Low <= Cond <= I->High.
- Pred = CmpInst::ICMP_ULE;
+ Pred = CmpInst::ICMP_SLE;
LHS = I->Low;
MHS = Cond;
RHS = I->High;
}
// If Fallthrough is unreachable, fold away the comparison.
// The false probability is the sum of all unhandled cases.
CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
emitSwitchCase(CB, SwitchMBB, MIB);
return true;
}
bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
Value *Cond,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *DefaultMBB,
MachineIRBuilder &MIB) {
using namespace SwitchCG;
MachineFunction *CurMF = FuncInfo.MF;
MachineBasicBlock *NextMBB = nullptr;
MachineFunction::iterator BBI(W.MBB);
if (++BBI != FuncInfo.MF->end())
NextMBB = &*BBI;
if (EnableOpts) {
// Here, we order cases by probability so the most likely case will be
// checked first. However, two clusters can have the same probability in
// which case their relative ordering is non-deterministic. So we use Low
// as a tie-breaker as clusters are guaranteed to never overlap.
llvm::sort(W.FirstCluster, W.LastCluster + 1,
[](const CaseCluster &a, const CaseCluster &b) {
return a.Prob != b.Prob
? a.Prob > b.Prob
: a.Low->getValue().slt(b.Low->getValue());
});
// Rearrange the case blocks so that the last one falls through if possible
// without changing the order of probabilities.
for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
--I;
if (I->Prob > W.LastCluster->Prob)
break;
if (I->Kind == CC_Range && I->MBB == NextMBB) {
std::swap(*I, *W.LastCluster);
break;
}
}
}
// Compute total probability.
BranchProbability DefaultProb = W.DefaultProb;
BranchProbability UnhandledProbs = DefaultProb;
for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
UnhandledProbs += I->Prob;
MachineBasicBlock *CurMBB = W.MBB;
for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
bool FallthroughUnreachable = false;
MachineBasicBlock *Fallthrough;
if (I == W.LastCluster) {
// For the last cluster, fall through to the default destination.
Fallthrough = DefaultMBB;
FallthroughUnreachable = isa<UnreachableInst>(
DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
} else {
Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
CurMF->insert(BBI, Fallthrough);
}
UnhandledProbs -= I->Prob;
switch (I->Kind) {
case CC_BitTests: {
LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
return false; // Bit tests currently unimplemented.
}
case CC_JumpTable: {
if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
UnhandledProbs, I, Fallthrough,
FallthroughUnreachable)) {
LLVM_DEBUG(dbgs() << "Failed to lower jump table");
return false;
}
break;
}
case CC_Range: {
if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
FallthroughUnreachable, UnhandledProbs,
CurMBB, MIB, SwitchMBB)) {
LLVM_DEBUG(dbgs() << "Failed to lower switch range");
return false;
}
break;
}
}
CurMBB = Fallthrough;
}
return true;
}
bool IRTranslator::translateIndirectBr(const User &U,
MachineIRBuilder &MIRBuilder) {
const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
MIRBuilder.buildBrIndirect(Tgt);
// Link successors.
MachineBasicBlock &CurBB = MIRBuilder.getMBB();
for (const BasicBlock *Succ : successors(&BrInst))
CurBB.addSuccessor(&getMBB(*Succ));
return true;
}
static bool isSwiftError(const Value *V) {
if (auto Arg = dyn_cast<Argument>(V))
return Arg->hasSwiftErrorAttr();
if (auto AI = dyn_cast<AllocaInst>(V))
return AI->isSwiftError();
return false;
}
bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
const LoadInst &LI = cast<LoadInst>(U);
auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOLoad;
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
ArrayRef<Register> Regs = getOrCreateVRegs(LI);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
Register Base = getOrCreateVReg(*LI.getPointerOperand());
Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
assert(Regs.size() == 1 && "swifterror should be single pointer");
Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
LI.getPointerOperand());
MIRBuilder.buildCopy(Regs[0], VReg);
return true;
}
for (unsigned i = 0; i < Regs.size(); ++i) {
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(LI);
auto MMO = MF->getMachineMemOperand(
Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
LI.getSyncScopeID(), LI.getOrdering());
MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
}
return true;
}
bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
const StoreInst &SI = cast<StoreInst>(U);
auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOStore;
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
Register Base = getOrCreateVReg(*SI.getPointerOperand());
Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
assert(Vals.size() == 1 && "swifterror should be single pointer");
Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
SI.getPointerOperand());
MIRBuilder.buildCopy(VReg, Vals[0]);
return true;
}
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(SI);
auto MMO = MF->getMachineMemOperand(
Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
SI.getSyncScopeID(), SI.getOrdering());
MIRBuilder.buildStore(Vals[i], Addr, *MMO);
}
return true;
}
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
const Value *Src = U.getOperand(0);
Type *Int32Ty = Type::getInt32Ty(U.getContext());
// getIndexedOffsetInType is designed for GEPs, so the first index is the
// usual array element rather than looking into the actual aggregate.
SmallVector<Value *, 1> Indices;
Indices.push_back(ConstantInt::get(Int32Ty, 0));
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
for (auto Idx : EVI->indices())
Indices.push_back(ConstantInt::get(Int32Ty, Idx));
} else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
for (auto Idx : IVI->indices())
Indices.push_back(ConstantInt::get(Int32Ty, Idx));
} else {
for (unsigned i = 1; i < U.getNumOperands(); ++i)
Indices.push_back(U.getOperand(i));
}
return 8 * static_cast<uint64_t>(
DL.getIndexedOffsetInType(Src->getType(), Indices));
}
bool IRTranslator::translateExtractValue(const User &U,
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
uint64_t Offset = getOffsetFromIndices(U, *DL);
ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
auto &DstRegs = allocateVRegs(U);
for (unsigned i = 0; i < DstRegs.size(); ++i)
DstRegs[i] = SrcRegs[Idx++];
return true;
}
bool IRTranslator::translateInsertValue(const User &U,
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
uint64_t Offset = getOffsetFromIndices(U, *DL);
auto &DstRegs = allocateVRegs(U);
ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
auto InsertedIt = InsertedRegs.begin();
for (unsigned i = 0; i < DstRegs.size(); ++i) {
if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
DstRegs[i] = *InsertedIt++;
else
DstRegs[i] = SrcRegs[i];
}
return true;
}
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
Register Tst = getOrCreateVReg(*U.getOperand(0));
ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
const SelectInst &SI = cast<SelectInst>(U);
uint16_t Flags = 0;
if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
for (unsigned i = 0; i < ResRegs.size(); ++i) {
MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
{Tst, Op0Regs[i], Op1Regs[i]}, Flags);
}
return true;
}
bool IRTranslator::translateBitCast(const User &U,
MachineIRBuilder &MIRBuilder) {
// If we're bitcasting to the source type, we can reuse the source vreg.
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
getLLTForType(*U.getType(), *DL)) {
Register SrcReg = getOrCreateVReg(*U.getOperand(0));
auto &Regs = *VMap.getVRegs(U);
// If we already assigned a vreg for this bitcast, we can't change that.
// Emit a copy to satisfy the users we already emitted.
if (!Regs.empty())
MIRBuilder.buildCopy(Regs[0], SrcReg);
else {
Regs.push_back(SrcReg);
VMap.getOffsets(U)->push_back(0);
}
return true;
}
return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
}
bool IRTranslator::translateCast(unsigned Opcode, const User &U,
MachineIRBuilder &MIRBuilder) {
Register Op = getOrCreateVReg(*U.getOperand(0));
Register Res = getOrCreateVReg(U);
MIRBuilder.buildInstr(Opcode, {Res}, {Op});
return true;
}
bool IRTranslator::translateGetElementPtr(const User &U,
MachineIRBuilder &MIRBuilder) {
// FIXME: support vector GEPs.
if (U.getType()->isVectorTy())
return false;
Value &Op0 = *U.getOperand(0);
Register BaseReg = getOrCreateVReg(Op0);
Type *PtrIRTy = Op0.getType();
LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
int64_t Offset = 0;
for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (StructType *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
continue;
} else {
uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
// If this is a scalar constant or a splat vector of constants,
// handle it quickly.
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
Offset += ElementSize * CI->getSExtValue();
continue;
}
if (Offset != 0) {
Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
BaseReg = NewBaseReg;
Offset = 0;
}
Register IdxReg = getOrCreateVReg(*Idx);
if (MRI->getType(IdxReg) != OffsetTy) {
Register NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
IdxReg = NewIdxReg;
}
// N = N + Idx * ElementSize;
// Avoid doing it for ElementSize of 1.
Register GepOffsetReg;
if (ElementSize != 1) {
GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
auto ElementSizeMIB = MIRBuilder.buildConstant(
getLLTForType(*OffsetIRTy, *DL), ElementSize);
MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
} else
GepOffsetReg = IdxReg;
Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
BaseReg = NewBaseReg;
}
}
if (Offset != 0) {
auto OffsetMIB =
MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
return true;
}
MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
return true;
}
bool IRTranslator::translateMemfunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
unsigned ID) {
// If the source is undef, then just emit a nop.
if (isa<UndefValue>(CI.getArgOperand(1))) {
switch (ID) {
case Intrinsic::memmove:
case Intrinsic::memcpy:
case Intrinsic::memset:
return true;
default:
break;
}
}
LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
Type *DstTy = CI.getArgOperand(0)->getType();
if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
return false;
SmallVector<CallLowering::ArgInfo, 8> Args;
for (int i = 0; i < 3; ++i) {
const auto &Arg = CI.getArgOperand(i);
Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
}
const char *Callee;
switch (ID) {
case Intrinsic::memmove:
case Intrinsic::memcpy: {
Type *SrcTy = CI.getArgOperand(1)->getType();
if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
return false;
Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
break;
}
case Intrinsic::memset:
Callee = "memset";
break;
default:
return false;
}
return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
MachineOperand::CreateES(Callee),
CallLowering::ArgInfo({0}, CI.getType()), Args);
}
void IRTranslator::getStackGuard(Register DstReg,
MachineIRBuilder &MIRBuilder) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
MIB.addDef(DstReg);
auto &TLI = *MF->getSubtarget().getTargetLowering();
Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
if (!Global)
return;
MachinePointerInfo MPInfo(Global);
auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
MachineMemOperand::MODereferenceable;
MachineMemOperand *MemRef =
MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
DL->getPointerABIAlignment(0));
MIB.setMemRefs({MemRef});
}
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
MIRBuilder.buildInstr(Op)
.addDef(ResRegs[0])
.addDef(ResRegs[1])
.addUse(getOrCreateVReg(*CI.getOperand(0)))
.addUse(getOrCreateVReg(*CI.getOperand(1)));
return true;
}
unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
switch (ID) {
default:
break;
case Intrinsic::bswap:
return TargetOpcode::G_BSWAP;
case Intrinsic::ceil:
return TargetOpcode::G_FCEIL;
case Intrinsic::cos:
return TargetOpcode::G_FCOS;
case Intrinsic::ctpop:
return TargetOpcode::G_CTPOP;
case Intrinsic::exp:
return TargetOpcode::G_FEXP;
case Intrinsic::exp2:
return TargetOpcode::G_FEXP2;
case Intrinsic::fabs:
return TargetOpcode::G_FABS;
case Intrinsic::copysign:
return TargetOpcode::G_FCOPYSIGN;
case Intrinsic::minnum:
return TargetOpcode::G_FMINNUM;
case Intrinsic::maxnum:
return TargetOpcode::G_FMAXNUM;
case Intrinsic::minimum:
return TargetOpcode::G_FMINIMUM;
case Intrinsic::maximum:
return TargetOpcode::G_FMAXIMUM;
case Intrinsic::canonicalize:
return TargetOpcode::G_FCANONICALIZE;
case Intrinsic::floor:
return TargetOpcode::G_FFLOOR;
case Intrinsic::fma:
return TargetOpcode::G_FMA;
case Intrinsic::log:
return TargetOpcode::G_FLOG;
case Intrinsic::log2:
return TargetOpcode::G_FLOG2;
case Intrinsic::log10:
return TargetOpcode::G_FLOG10;
case Intrinsic::nearbyint:
return TargetOpcode::G_FNEARBYINT;
case Intrinsic::pow:
return TargetOpcode::G_FPOW;
case Intrinsic::rint:
return TargetOpcode::G_FRINT;
case Intrinsic::round:
return TargetOpcode::G_INTRINSIC_ROUND;
case Intrinsic::sin:
return TargetOpcode::G_FSIN;
case Intrinsic::sqrt:
return TargetOpcode::G_FSQRT;
case Intrinsic::trunc:
return TargetOpcode::G_INTRINSIC_TRUNC;
}
return Intrinsic::not_intrinsic;
}
bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
Intrinsic::ID ID,
MachineIRBuilder &MIRBuilder) {
unsigned Op = getSimpleIntrinsicOpcode(ID);
// Is this a simple intrinsic?
if (Op == Intrinsic::not_intrinsic)
return false;
// Yes. Let's translate it.
SmallVector<llvm::SrcOp, 4> VRegs;
for (auto &Arg : CI.arg_operands())
VRegs.push_back(getOrCreateVReg(*Arg));
MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
MachineInstr::copyFlagsFromInstruction(CI));
return true;
}
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineIRBuilder &MIRBuilder) {
// If this is a simple intrinsic (that is, we just need to add a def of
// a vreg, and uses for each arg operand, then translate it.
if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
return true;
switch (ID) {
default:
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
// No stack colouring in O0, discard region information.
if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
return true;
unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
: TargetOpcode::LIFETIME_END;
// Get the underlying objects for the location passed on the lifetime
// marker.
SmallVector<const Value *, 4> Allocas;
GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
// Iterate over each underlying object, creating lifetime markers for each
// static alloca. Quit if we find a non-static alloca.
for (const Value *V : Allocas) {
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
if (!AI)
continue;
if (!AI->isStaticAlloca())
return true;
MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
}
return true;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
assert(DI.getVariable() && "Missing variable");
const Value *Address = DI.getAddress();
if (!Address || isa<UndefValue>(Address)) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return true;
}
assert(DI.getVariable()->isValidLocationForIntrinsic(
MIRBuilder.getDebugLoc()) &&
"Expected inlined-at fields to agree");
auto AI = dyn_cast<AllocaInst>(Address);
if (AI && AI->isStaticAlloca()) {
// Static allocas are tracked at the MF level, no need for DBG_VALUE
// instructions (in fact, they get ignored if they *do* exist).
MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
getOrCreateFrameIndex(*AI), DI.getDebugLoc());
} else {
// A dbg.declare describes the address of a source variable, so lower it
// into an indirect DBG_VALUE.
MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
DI.getVariable(), DI.getExpression());
}
return true;
}
case Intrinsic::dbg_label: {
const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
assert(DI.getLabel() && "Missing label");
assert(DI.getLabel()->isValidLocationForIntrinsic(
MIRBuilder.getDebugLoc()) &&
"Expected inlined-at fields to agree");
MIRBuilder.buildDbgLabel(DI.getLabel());
return true;
}
case Intrinsic::vaend:
// No target I know of cares about va_end. Certainly no in-tree target
// does. Simplest intrinsic ever!
return true;
case Intrinsic::vastart: {
auto &TLI = *MF->getSubtarget().getTargetLowering();
Value *Ptr = CI.getArgOperand(0);
unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
// FIXME: Get alignment
MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
.addUse(getOrCreateVReg(*Ptr))
.addMemOperand(MF->getMachineMemOperand(
MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
return true;
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
const DbgValueInst &DI = cast<DbgValueInst>(CI);
const Value *V = DI.getValue();
assert(DI.getVariable()->isValidLocationForIntrinsic(
MIRBuilder.getDebugLoc()) &&
"Expected inlined-at fields to agree");
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
} else if (const auto *CI = dyn_cast<Constant>(V)) {
MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
} else {
Register Reg = getOrCreateVReg(*V);
// FIXME: This does not handle register-indirect values at offset 0. The
// direct/indirect thing shouldn't really be handled by something as
// implicit as reg+noreg vs reg+imm in the first palce, but it seems
// pretty baked in right now.
MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
}
return true;
}
case Intrinsic::uadd_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
case Intrinsic::sadd_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
case Intrinsic::usub_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
case Intrinsic::ssub_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
case Intrinsic::umul_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
case Intrinsic::smul_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
case Intrinsic::fmuladd: {
const TargetMachine &TM = MF->getTarget();
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
Register Dst = getOrCreateVReg(CI);
Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
// TODO: Revisit this to see if we should move this part of the
// lowering to the combiner.
MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
MachineInstr::copyFlagsFromInstruction(CI));
} else {
LLT Ty = getLLTForType(*CI.getType(), *DL);
auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
MachineInstr::copyFlagsFromInstruction(CI));
MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
MachineInstr::copyFlagsFromInstruction(CI));
}
return true;
}
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
return translateMemfunc(CI, MIRBuilder, ID);
case Intrinsic::eh_typeid_for: {
GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
Register Reg = getOrCreateVReg(CI);
unsigned TypeID = MF->getTypeIDFor(GV);
MIRBuilder.buildConstant(Reg, TypeID);
return true;
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
return true;
}
case Intrinsic::is_constant:
// If this wasn't constant-folded away by now, then it's not a
// constant.
MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
return true;
case Intrinsic::stackguard:
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
case Intrinsic::stackprotector: {
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
int FI = getOrCreateFrameIndex(*Slot);
MF->getFrameInfo().setStackProtectorIndex(FI);
MIRBuilder.buildStore(
GuardVal, getOrCreateVReg(*Slot),
*MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile,
PtrTy.getSizeInBits() / 8, 8));
return true;
}
case Intrinsic::stacksave: {
// Save the stack pointer to the location provided by the intrinsic.
Register Reg = getOrCreateVReg(CI);
Register StackPtr = MF->getSubtarget()
.getTargetLowering()
->getStackPointerRegisterToSaveRestore();
// If the target doesn't specify a stack pointer, then fall back.
if (!StackPtr)
return false;
MIRBuilder.buildCopy(Reg, StackPtr);
return true;
}
case Intrinsic::stackrestore: {
// Restore the stack pointer from the location provided by the intrinsic.
Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
Register StackPtr = MF->getSubtarget()
.getTargetLowering()
->getStackPointerRegisterToSaveRestore();
// If the target doesn't specify a stack pointer, then fall back.
if (!StackPtr)
return false;
MIRBuilder.buildCopy(StackPtr, Reg);
return true;
}
case Intrinsic::cttz:
case Intrinsic::ctlz: {
ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
bool isTrailing = ID == Intrinsic::cttz;
unsigned Opcode = isTrailing
? Cst->isZero() ? TargetOpcode::G_CTTZ
: TargetOpcode::G_CTTZ_ZERO_UNDEF
: Cst->isZero() ? TargetOpcode::G_CTLZ
: TargetOpcode::G_CTLZ_ZERO_UNDEF;
MIRBuilder.buildInstr(Opcode)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
return true;
}
case Intrinsic::invariant_start: {
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
Register Undef = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildUndef(Undef);
return true;
}
case Intrinsic::invariant_end:
return true;
case Intrinsic::assume:
case Intrinsic::var_annotation:
case Intrinsic::sideeffect:
// Discard annotate attributes, assumptions, and artificial side-effects.
return true;
}
return false;
}
bool IRTranslator::translateInlineAsm(const CallInst &CI,
MachineIRBuilder &MIRBuilder) {
const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
if (!IA.getConstraintString().empty())
return false;
unsigned ExtraInfo = 0;
if (IA.hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA.getDialect() == InlineAsm::AD_Intel)
ExtraInfo |= InlineAsm::Extra_AsmDialect;
MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
.addExternalSymbol(IA.getAsmString().c_str())
.addImm(ExtraInfo);
return true;
}
bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
const CallInst &CI = cast<CallInst>(U);
auto TII = MF->getTarget().getIntrinsicInfo();
const Function *F = CI.getCalledFunction();
// FIXME: support Windows dllimport function calls.
if (F && F->hasDLLImportStorageClass())
return false;
if (CI.isInlineAsm())
return translateInlineAsm(CI, MIRBuilder);
Intrinsic::ID ID = Intrinsic::not_intrinsic;
if (F && F->isIntrinsic()) {
ID = F->getIntrinsicID();
if (TII && ID == Intrinsic::not_intrinsic)
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
}
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
ArrayRef<Register> Res = getOrCreateVRegs(CI);
SmallVector<ArrayRef<Register>, 8> Args;
Register SwiftInVReg = 0;
Register SwiftErrorVReg = 0;
for (auto &Arg: CI.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
assert(SwiftInVReg == 0 && "Expected only one swift error argument");
LLT Ty = getLLTForType(*Arg->getType(), *DL);
SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
&CI, &MIRBuilder.getMBB(), Arg));
Args.emplace_back(makeArrayRef(SwiftInVReg));
SwiftErrorVReg =
SwiftError.getOrCreateVRegDefAt(&CI, &MIRBuilder.getMBB(), Arg);
continue;
}
Args.push_back(getOrCreateVRegs(*Arg));
}
MF->getFrameInfo().setHasCalls(true);
bool Success =
CLI->lowerCall(MIRBuilder, &CI, Res, Args, SwiftErrorVReg,
[&]() { return getOrCreateVReg(*CI.getCalledValue()); });
return Success;
}
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;
ArrayRef<Register> ResultRegs;
if (!CI.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CI);
// Ignore the callsite attributes. Backend code is most likely not expecting
// an intrinsic to sometimes have side effects and sometimes not.
MachineInstrBuilder MIB =
MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
if (isa<FPMathOperator>(CI))
MIB->copyIRFlags(CI);
for (auto &Arg : CI.arg_operands()) {
// Some intrinsics take metadata parameters. Reject them.
if (isa<MetadataAsValue>(Arg))
return false;
ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg);
if (VRegs.size() > 1)
return false;
MIB.addUse(VRegs[0]);
}
// Add a MachineMemOperand if it is a target mem intrinsic.
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
TargetLowering::IntrinsicInfo Info;
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
unsigned Align = Info.align;
if (Align == 0)
Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
uint64_t Size = Info.memVT.getStoreSize();
MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
Info.flags, Size, Align));
}
return true;
}
bool IRTranslator::translateInvoke(const User &U,
MachineIRBuilder &MIRBuilder) {
const InvokeInst &I = cast<InvokeInst>(U);
MCContext &Context = MF->getContext();
const BasicBlock *ReturnBB = I.getSuccessor(0);
const BasicBlock *EHPadBB = I.getSuccessor(1);
const Value *Callee = I.getCalledValue();
const Function *Fn = dyn_cast<Function>(Callee);
if (isa<InlineAsm>(Callee))
return false;
// FIXME: support invoking patchpoint and statepoint intrinsics.
if (Fn && Fn->isIntrinsic())
return false;
// FIXME: support whatever these are.
if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
return false;
// FIXME: support Windows exception handling.
if (!isa<LandingPadInst>(EHPadBB->front()))
return false;
// Emit the actual call, bracketed by EH_LABELs so that the MF knows about
// the region covered by the try.
MCSymbol *BeginSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
ArrayRef<Register> Res;
if (!I.getType()->isVoidTy())
Res = getOrCreateVRegs(I);
SmallVector<ArrayRef<Register>, 8> Args;
Register SwiftErrorVReg = 0;
Register SwiftInVReg = 0;
for (auto &Arg : I.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
assert(SwiftInVReg == 0 && "Expected only one swift error argument");
LLT Ty = getLLTForType(*Arg->getType(), *DL);
SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
&I, &MIRBuilder.getMBB(), Arg));
Args.push_back(makeArrayRef(SwiftInVReg));
SwiftErrorVReg =
SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
continue;
}
Args.push_back(getOrCreateVRegs(*Arg));
}
if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, SwiftErrorVReg,
[&]() { return getOrCreateVReg(*I.getCalledValue()); }))
return false;
MCSymbol *EndSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
// FIXME: track probabilities.
MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
&ReturnMBB = getMBB(*ReturnBB);
MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
MIRBuilder.buildBr(ReturnMBB);
return true;
}
bool IRTranslator::translateCallBr(const User &U,
MachineIRBuilder &MIRBuilder) {
// FIXME: Implement this.
return false;
}
bool IRTranslator::translateLandingPad(const User &U,
MachineIRBuilder &MIRBuilder) {
const LandingPadInst &LP = cast<LandingPadInst>(U);
MachineBasicBlock &MBB = MIRBuilder.getMBB();
MBB.setIsEHPad();
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother.
auto &TLI = *MF->getSubtarget().getTargetLowering();
const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
return true;
// If landingpad's return type is token type, we don't create DAG nodes
// for its exception pointer and selector value. The extraction of exception
// pointer or selector value from token type landingpads is not currently
// supported.
if (LP.getType()->isTokenTy())
return true;
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
.addSym(MF->addLandingPad(&MBB));
LLT Ty = getLLTForType(*LP.getType(), *DL);
Register Undef = MRI->createGenericVirtualRegister(Ty);
MIRBuilder.buildUndef(Undef);
SmallVector<LLT, 2> Tys;
for (Type *Ty : cast<StructType>(LP.getType())->elements())
Tys.push_back(getLLTForType(*Ty, *DL));
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
// Mark exception register as live in.
Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
if (!ExceptionReg)
return false;
MBB.addLiveIn(ExceptionReg);
ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
if (!SelectorReg)
return false;
MBB.addLiveIn(SelectorReg);
Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
MIRBuilder.buildCopy(PtrVReg, SelectorReg);
MIRBuilder.buildCast(ResRegs[1], PtrVReg);
return true;
}
bool IRTranslator::translateAlloca(const User &U,
MachineIRBuilder &MIRBuilder) {
auto &AI = cast<AllocaInst>(U);
if (AI.isSwiftError())
return true;
if (AI.isStaticAlloca()) {
Register Res = getOrCreateVReg(AI);
int FI = getOrCreateFrameIndex(AI);
MIRBuilder.buildFrameIndex(Res, FI);
return true;
}
// FIXME: support stack probing for Windows.
if (MF->getTarget().getTargetTriple().isOSWindows())
return false;
// Now we're in the harder dynamic case.
Type *Ty = AI.getAllocatedType();
unsigned Align =
std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
Register NumElts = getOrCreateVReg(*AI.getArraySize());
Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
if (MRI->getType(NumElts) != IntPtrTy) {
Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
NumElts = ExtElts;
}
Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
Register TySize =
getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
auto &TLI = *MF->getSubtarget().getTargetLowering();
Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
Register SPTmp = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildCopy(SPTmp, SPReg);
Register AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
// Handle alignment. We have to realign if the allocation granule was smaller
// than stack alignment, or the specific alloca requires more than stack
// alignment.
unsigned StackAlign =
MF->getSubtarget().getFrameLowering()->getStackAlignment();
Align = std::max(Align, StackAlign);
if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
// Round the size of the allocation up to the stack alignment size
// by add SA-1 to the size. This doesn't overflow because we're computing
// an address inside an alloca.
Register AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
AllocTmp = AlignedAlloc;
}
MIRBuilder.buildCopy(SPReg, AllocTmp);
MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
assert(MF->getFrameInfo().hasVarSizedObjects());
return true;
}
bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
// FIXME: We may need more info about the type. Because of how LLT works,
// we're completely discarding the i64/double distinction here (amongst
// others). Fortunately the ABIs I know of where that matters don't use va_arg
// anyway but that's not guaranteed.
MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
.addDef(getOrCreateVReg(U))
.addUse(getOrCreateVReg(*U.getOperand(0)))
.addImm(DL->getABITypeAlignment(U.getType()));
return true;
}
bool IRTranslator::translateInsertElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
if (U.getType()->getVectorNumElements() == 1) {
Register Elt = getOrCreateVReg(*U.getOperand(1));
auto &Regs = *VMap.getVRegs(U);
if (Regs.empty()) {
Regs.push_back(Elt);
VMap.getOffsets(U)->push_back(0);
} else {
MIRBuilder.buildCopy(Regs[0], Elt);
}
return true;
}
Register Res = getOrCreateVReg(U);
Register Val = getOrCreateVReg(*U.getOperand(0));
Register Elt = getOrCreateVReg(*U.getOperand(1));
Register Idx = getOrCreateVReg(*U.getOperand(2));
MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
return true;
}
bool IRTranslator::translateExtractElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
Register Elt = getOrCreateVReg(*U.getOperand(0));
auto &Regs = *VMap.getVRegs(U);
if (Regs.empty()) {
Regs.push_back(Elt);
VMap.getOffsets(U)->push_back(0);
} else {
MIRBuilder.buildCopy(Regs[0], Elt);
}
return true;
}
Register Res = getOrCreateVReg(U);
Register Val = getOrCreateVReg(*U.getOperand(0));
const auto &TLI = *MF->getSubtarget().getTargetLowering();
unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
Register Idx;
if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
if (CI->getBitWidth() != PreferredVecIdxWidth) {
APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
Idx = getOrCreateVReg(*NewIdxCI);
}
}
if (!Idx)
Idx = getOrCreateVReg(*U.getOperand(1));
if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
}
MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
return true;
}
bool IRTranslator::translateShuffleVector(const User &U,
MachineIRBuilder &MIRBuilder) {
MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
.addDef(getOrCreateVReg(U))
.addUse(getOrCreateVReg(*U.getOperand(0)))
.addUse(getOrCreateVReg(*U.getOperand(1)))
.addUse(getOrCreateVReg(*U.getOperand(2)));
return true;
}
bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
const PHINode &PI = cast<PHINode>(U);
SmallVector<MachineInstr *, 4> Insts;
for (auto Reg : getOrCreateVRegs(PI)) {
auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
Insts.push_back(MIB.getInstr());
}
PendingPHIs.emplace_back(&PI, std::move(Insts));
return true;
}
bool IRTranslator::translateAtomicCmpXchg(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
if (I.isWeak())
return false;
auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Type *ResType = I.getType();
Type *ValType = ResType->Type::getStructElementType(0);
auto Res = getOrCreateVRegs(I);
Register OldValRes = Res[0];
Register SuccessRes = Res[1];
Register Addr = getOrCreateVReg(*I.getPointerOperand());
Register Cmp = getOrCreateVReg(*I.getCompareOperand());
Register NewVal = getOrCreateVReg(*I.getNewValOperand());
MIRBuilder.buildAtomicCmpXchgWithSuccess(
OldValRes, SuccessRes, Addr, Cmp, NewVal,
*MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
Flags, DL->getTypeStoreSize(ValType),
getMemOpAlignment(I), AAMDNodes(), nullptr,
I.getSyncScopeID(), I.getSuccessOrdering(),
I.getFailureOrdering()));
return true;
}
bool IRTranslator::translateAtomicRMW(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Type *ResType = I.getType();
Register Res = getOrCreateVReg(I);
Register Addr = getOrCreateVReg(*I.getPointerOperand());
Register Val = getOrCreateVReg(*I.getValOperand());
unsigned Opcode = 0;
switch (I.getOperation()) {
default:
llvm_unreachable("Unknown atomicrmw op");
return false;
case AtomicRMWInst::Xchg:
Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
break;
case AtomicRMWInst::Add:
Opcode = TargetOpcode::G_ATOMICRMW_ADD;
break;
case AtomicRMWInst::Sub:
Opcode = TargetOpcode::G_ATOMICRMW_SUB;
break;
case AtomicRMWInst::And:
Opcode = TargetOpcode::G_ATOMICRMW_AND;
break;
case AtomicRMWInst::Nand:
Opcode = TargetOpcode::G_ATOMICRMW_NAND;
break;
case AtomicRMWInst::Or:
Opcode = TargetOpcode::G_ATOMICRMW_OR;
break;
case AtomicRMWInst::Xor:
Opcode = TargetOpcode::G_ATOMICRMW_XOR;
break;
case AtomicRMWInst::Max:
Opcode = TargetOpcode::G_ATOMICRMW_MAX;
break;
case AtomicRMWInst::Min:
Opcode = TargetOpcode::G_ATOMICRMW_MIN;
break;
case AtomicRMWInst::UMax:
Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
break;
case AtomicRMWInst::UMin:
Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
break;
}
MIRBuilder.buildAtomicRMW(
Opcode, Res, Addr, Val,
*MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
Flags, DL->getTypeStoreSize(ResType),
getMemOpAlignment(I), AAMDNodes(), nullptr,
I.getSyncScopeID(), I.getOrdering()));
return true;
}
bool IRTranslator::translateFence(const User &U,
MachineIRBuilder &MIRBuilder) {
const FenceInst &Fence = cast<FenceInst>(U);
MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
Fence.getSyncScopeID());
return true;
}
void IRTranslator::finishPendingPhis() {
#ifndef NDEBUG
DILocationVerifier Verifier;
GISelObserverWrapper WrapperObserver(&Verifier);
RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
#endif // ifndef NDEBUG
for (auto &Phi : PendingPHIs) {
const PHINode *PI = Phi.first;
ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
EntryBuilder->setDebugLoc(PI->getDebugLoc());
#ifndef NDEBUG
Verifier.setCurrentInst(PI);
#endif // ifndef NDEBUG
SmallSet<const MachineBasicBlock *, 16> SeenPreds;
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
continue;
SeenPreds.insert(Pred);
for (unsigned j = 0; j < ValRegs.size(); ++j) {
MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
MIB.addUse(ValRegs[j]);
MIB.addMBB(Pred);
}
}
}
}
}
bool IRTranslator::valueIsSplit(const Value &V,
SmallVectorImpl<uint64_t> *Offsets) {
SmallVector<LLT, 4> SplitTys;
if (Offsets && !Offsets->empty())
Offsets->clear();
computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
return SplitTys.size() > 1;
}
bool IRTranslator::translate(const Instruction &Inst) {
CurBuilder->setDebugLoc(Inst.getDebugLoc());
// We only emit constants into the entry block from here. To prevent jumpy
// debug behaviour set the line to 0.
if (const DebugLoc &DL = Inst.getDebugLoc())
EntryBuilder->setDebugLoc(
DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
else
EntryBuilder->setDebugLoc(DebugLoc());
switch (Inst.getOpcode()) {
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: \
return translate##OPCODE(Inst, *CurBuilder.get());
#include "llvm/IR/Instruction.def"
default:
return false;
}
}
bool IRTranslator::translate(const Constant &C, Register Reg) {
if (auto CI = dyn_cast<ConstantInt>(&C))
EntryBuilder->buildConstant(Reg, *CI);
else if (auto CF = dyn_cast<ConstantFP>(&C))
EntryBuilder->buildFConstant(Reg, *CF);
else if (isa<UndefValue>(C))
EntryBuilder->buildUndef(Reg);
else if (isa<ConstantPointerNull>(C)) {
// As we are trying to build a constant val of 0 into a pointer,
// insert a cast to make them correct with respect to types.
unsigned NullSize = DL->getTypeSizeInBits(C.getType());
auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
Register ZeroReg = getOrCreateVReg(*ZeroVal);
EntryBuilder->buildCast(Reg, ZeroReg);
} else if (auto GV = dyn_cast<GlobalValue>(&C))
EntryBuilder->buildGlobalValue(Reg, GV);
else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
if (!CAZ->getType()->isVectorTy())
return false;
// Return the scalar if it is a <1 x Ty> vector.
if (CAZ->getNumElements() == 1)
return translate(*CAZ->getElementValue(0u), Reg);
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
Constant &Elt = *CAZ->getElementValue(i);
Ops.push_back(getOrCreateVReg(Elt));
}
EntryBuilder->buildBuildVector(Reg, Ops);
} else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
// Return the scalar if it is a <1 x Ty> vector.
if (CV->getNumElements() == 1)
return translate(*CV->getElementAsConstant(0), Reg);
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumElements(); ++i) {
Constant &Elt = *CV->getElementAsConstant(i);
Ops.push_back(getOrCreateVReg(Elt));
}
EntryBuilder->buildBuildVector(Reg, Ops);
} else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
switch(CE->getOpcode()) {
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: \
return translate##OPCODE(*CE, *EntryBuilder.get());
#include "llvm/IR/Instruction.def"
default:
return false;
}
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
}
EntryBuilder->buildBuildVector(Reg, Ops);
} else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
EntryBuilder->buildBlockAddress(Reg, BA);
} else
return false;
return true;
}
void IRTranslator::finalizeBasicBlock() {
for (auto &JTCase : SL->JTCases) {
// Emit header first, if it wasn't already emitted.
if (!JTCase.first.Emitted)
emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
emitJumpTable(JTCase.second, JTCase.second.MBB);
}
SL->JTCases.clear();
}
void IRTranslator::finalizeFunction() {
// Release the memory used by the different maps we
// needed during the translation.
PendingPHIs.clear();
VMap.reset();
FrameIndices.clear();
MachinePreds.clear();
// MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
// to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
// destroying it twice (in ~IRTranslator() and ~LLVMContext())
EntryBuilder.reset();
CurBuilder.reset();
FuncInfo.clear();
}
bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
MF = &CurMF;
const Function &F = MF->getFunction();
if (F.empty())
return false;
GISelCSEAnalysisWrapper &Wrapper =
getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
// Set the CSEConfig and run the analysis.
GISelCSEInfo *CSEInfo = nullptr;
TPC = &getAnalysis<TargetPassConfig>();
bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
? EnableCSEInIRTranslator
: TPC->isGISelCSEEnabled();
if (EnableCSE) {
EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
CSEInfo = &Wrapper.get(TPC->getCSEConfig());
EntryBuilder->setCSEInfo(CSEInfo);
CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
CurBuilder->setCSEInfo(CSEInfo);
} else {
EntryBuilder = make_unique<MachineIRBuilder>();
CurBuilder = make_unique<MachineIRBuilder>();
}
CLI = MF->getSubtarget().getCallLowering();
CurBuilder->setMF(*MF);
EntryBuilder->setMF(*MF);
MRI = &MF->getRegInfo();
DL = &F.getParent()->getDataLayout();
ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
FuncInfo.MF = MF;
FuncInfo.BPI = nullptr;
const auto &TLI = *MF->getSubtarget().getTargetLowering();
const TargetMachine &TM = MF->getTarget();
SL = make_unique<GISelSwitchLowering>(this, FuncInfo);
SL->init(TLI, TM, *DL);
EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
assert(PendingPHIs.empty() && "stale PHIs");
if (!DL->isLittleEndian()) {
// Currently we don't properly handle big endian code.
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
F.getSubprogram(), &F.getEntryBlock());
R << "unable to translate in big endian mode";
reportTranslationError(*MF, *TPC, *ORE, R);
}
// Release the per-function state when we return, whether we succeeded or not.
auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
// Setup a separate basic-block for the arguments and constants
MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
MF->push_back(EntryBB);
EntryBuilder->setMBB(*EntryBB);
DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
SwiftError.setFunction(CurMF);
SwiftError.createEntriesInEntryBlock(DbgLoc);
// Create all blocks, in IR order, to preserve the layout.
for (const BasicBlock &BB: F) {
auto *&MBB = BBToMBB[&BB];
MBB = MF->CreateMachineBasicBlock(&BB);
MF->push_back(MBB);
if (BB.hasAddressTaken())
MBB->setHasAddressTaken();
}
// Make our arguments/constants entry block fallthrough to the IR entry block.
EntryBB->addSuccessor(&getMBB(F.front()));
// Lower the actual args into this basic block.
SmallVector<ArrayRef<Register>, 8> VRegArgs;
for (const Argument &Arg: F.args()) {
if (DL->getTypeStoreSize(Arg.getType()) == 0)
continue; // Don't handle zero sized types.
ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
VRegArgs.push_back(VRegs);
if (Arg.hasSwiftErrorAttr()) {
assert(VRegs.size() == 1 && "Too many vregs for Swift error");
SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
}
}
// We don't currently support translating swifterror or swiftself functions.
for (auto &Arg : F.args()) {
if (Arg.hasSwiftSelfAttr()) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
F.getSubprogram(), &F.getEntryBlock());
R << "unable to lower arguments due to swiftself: "
<< ore::NV("Prototype", F.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
return false;
}
}
if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
F.getSubprogram(), &F.getEntryBlock());
R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
return false;
}
// Need to visit defs before uses when translating instructions.
GISelObserverWrapper WrapperObserver;
if (EnableCSE && CSEInfo)
WrapperObserver.addObserver(CSEInfo);
{
ReversePostOrderTraversal<const Function *> RPOT(&F);
#ifndef NDEBUG
DILocationVerifier Verifier;
WrapperObserver.addObserver(&Verifier);
#endif // ifndef NDEBUG
RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
for (const BasicBlock *BB : RPOT) {
MachineBasicBlock &MBB = getMBB(*BB);
// Set the insertion point of all the following translations to
// the end of this basic block.
CurBuilder->setMBB(MBB);
for (const Instruction &Inst : *BB) {
#ifndef NDEBUG
Verifier.setCurrentInst(&Inst);
#endif // ifndef NDEBUG
if (translate(Inst))
continue;
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
Inst.getDebugLoc(), BB);
R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
std::string InstStrStorage;
raw_string_ostream InstStr(InstStrStorage);
InstStr << Inst;
R << ": '" << InstStr.str() << "'";
}
reportTranslationError(*MF, *TPC, *ORE, R);
return false;
}
finalizeBasicBlock();
}
#ifndef NDEBUG
WrapperObserver.removeObserver(&Verifier);
#endif
}
finishPendingPhis();
SwiftError.propagateVRegs();
// Merge the argument lowering and constants block with its single
// successor, the LLVM-IR entry block. We want the basic block to
// be maximal.
assert(EntryBB->succ_size() == 1 &&
"Custom BB used for lowering should have only one successor");
// Get the successor of the current entry block.
MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
assert(NewEntryBB.pred_size() == 1 &&
"LLVM-IR entry block has a predecessor!?");
// Move all the instruction from the current entry block to the
// new entry block.
NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
EntryBB->end());
// Update the live-in information for the new entry block.
for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
NewEntryBB.addLiveIn(LiveIn);
NewEntryBB.sortUniqueLiveIns();
// Get rid of the now empty basic block.
EntryBB->removeSuccessor(&NewEntryBB);
MF->remove(EntryBB);
MF->DeleteMachineBasicBlock(EntryBB);
assert(&MF->front() == &NewEntryBB &&
"New entry wasn't next in the list of basic block!");
// Initialize stack protector information.
StackProtector &SP = getAnalysis<StackProtector>();
SP.copyToMachineFrameInfo(MF->getFrameInfo());
return false;
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp (revision 356465)
@@ -1,1109 +1,1116 @@
//===- MachineFunction.cpp ------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Collect native machine code information for a function. This allows
// target-specific information about the generated code to be stored with each
// function.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/WasmEHFuncInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "codegen"
static cl::opt<unsigned>
AlignAllFunctions("align-all-functions",
cl::desc("Force the alignment of all functions."),
cl::init(0), cl::Hidden);
static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
using P = MachineFunctionProperties::Property;
switch(Prop) {
case P::FailedISel: return "FailedISel";
case P::IsSSA: return "IsSSA";
case P::Legalized: return "Legalized";
case P::NoPHIs: return "NoPHIs";
case P::NoVRegs: return "NoVRegs";
case P::RegBankSelected: return "RegBankSelected";
case P::Selected: return "Selected";
case P::TracksLiveness: return "TracksLiveness";
}
llvm_unreachable("Invalid machine function property");
}
// Pin the vtable to this file.
void MachineFunction::Delegate::anchor() {}
void MachineFunctionProperties::print(raw_ostream &OS) const {
const char *Separator = "";
for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
if (!Properties[I])
continue;
OS << Separator << getPropertyName(static_cast<Property>(I));
Separator = ", ";
}
}
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
// Out-of-line virtual method.
MachineFunctionInfo::~MachineFunctionInfo() = default;
void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
MBB->getParent()->DeleteMachineBasicBlock(MBB);
}
static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
const Function &F) {
if (F.hasFnAttribute(Attribute::StackAlignment))
return F.getFnStackAlignment();
return STI->getFrameLowering()->getStackAlignment();
}
MachineFunction::MachineFunction(const Function &F,
const LLVMTargetMachine &Target,
const TargetSubtargetInfo &STI,
unsigned FunctionNum, MachineModuleInfo &mmi)
: F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
FunctionNumber = FunctionNum;
init();
}
void MachineFunction::handleInsertion(MachineInstr &MI) {
if (TheDelegate)
TheDelegate->MF_HandleInsertion(MI);
}
void MachineFunction::handleRemoval(MachineInstr &MI) {
if (TheDelegate)
TheDelegate->MF_HandleRemoval(MI);
}
void MachineFunction::init() {
// Assume the function starts in SSA form with correct liveness.
Properties.set(MachineFunctionProperties::Property::IsSSA);
Properties.set(MachineFunctionProperties::Property::TracksLiveness);
if (STI->getRegisterInfo())
RegInfo = new (Allocator) MachineRegisterInfo(this);
else
RegInfo = nullptr;
MFInfo = nullptr;
// We can realign the stack if the target supports it and the user hasn't
// explicitly asked us not to.
bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
!F.hasFnAttribute("no-realign-stack");
FrameInfo = new (Allocator) MachineFrameInfo(
getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
/*ForcedRealign=*/CanRealignSP &&
F.hasFnAttribute(Attribute::StackAlignment));
if (F.hasFnAttribute(Attribute::StackAlignment))
FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
// FIXME: Use Function::hasOptSize().
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());
if (AlignAllFunctions)
Alignment = AlignAllFunctions;
JumpTableInfo = nullptr;
if (isFuncletEHPersonality(classifyEHPersonality(
F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
WinEHInfo = new (Allocator) WinEHFuncInfo();
}
if (isScopedEHPersonality(classifyEHPersonality(
F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
WasmEHInfo = new (Allocator) WasmEHFuncInfo();
}
assert(Target.isCompatibleDataLayout(getDataLayout()) &&
"Can't create a MachineFunction using a Module with a "
"Target-incompatible DataLayout attached\n");
PSVManager =
llvm::make_unique<PseudoSourceValueManager>(*(getSubtarget().
getInstrInfo()));
}
MachineFunction::~MachineFunction() {
clear();
}
void MachineFunction::clear() {
Properties.reset();
// Don't call destructors on MachineInstr and MachineOperand. All of their
// memory comes from the BumpPtrAllocator which is about to be purged.
//
// Do call MachineBasicBlock destructors, it contains std::vectors.
for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
I->Insts.clearAndLeakNodesUnsafely();
MBBNumbering.clear();
InstructionRecycler.clear(Allocator);
OperandRecycler.clear(Allocator);
BasicBlockRecycler.clear(Allocator);
CodeViewAnnotations.clear();
VariableDbgInfos.clear();
if (RegInfo) {
RegInfo->~MachineRegisterInfo();
Allocator.Deallocate(RegInfo);
}
if (MFInfo) {
MFInfo->~MachineFunctionInfo();
Allocator.Deallocate(MFInfo);
}
FrameInfo->~MachineFrameInfo();
Allocator.Deallocate(FrameInfo);
ConstantPool->~MachineConstantPool();
Allocator.Deallocate(ConstantPool);
if (JumpTableInfo) {
JumpTableInfo->~MachineJumpTableInfo();
Allocator.Deallocate(JumpTableInfo);
}
if (WinEHInfo) {
WinEHInfo->~WinEHFuncInfo();
Allocator.Deallocate(WinEHInfo);
}
if (WasmEHInfo) {
WasmEHInfo->~WasmEHFuncInfo();
Allocator.Deallocate(WasmEHInfo);
}
}
const DataLayout &MachineFunction::getDataLayout() const {
return F.getParent()->getDataLayout();
}
/// Get the JumpTableInfo for this function.
/// If it does not already exist, allocate one.
MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
JumpTableInfo = new (Allocator)
MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
return JumpTableInfo;
}
/// Should we be emitting segmented stack stuff for the function
bool MachineFunction::shouldSplitStack() const {
return getFunction().hasFnAttribute("split-stack");
}
LLVM_NODISCARD unsigned
MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
FrameInstructions.push_back(Inst);
return FrameInstructions.size() - 1;
}
/// This discards all of the MachineBasicBlock numbers and recomputes them.
/// This guarantees that the MBB numbers are sequential, dense, and match the
/// ordering of the blocks within the function. If a specific MachineBasicBlock
/// is specified, only that block and those after it are renumbered.
void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (empty()) { MBBNumbering.clear(); return; }
MachineFunction::iterator MBBI, E = end();
if (MBB == nullptr)
MBBI = begin();
else
MBBI = MBB->getIterator();
// Figure out the block number this should have.
unsigned BlockNo = 0;
if (MBBI != begin())
BlockNo = std::prev(MBBI)->getNumber() + 1;
for (; MBBI != E; ++MBBI, ++BlockNo) {
if (MBBI->getNumber() != (int)BlockNo) {
// Remove use of the old number.
if (MBBI->getNumber() != -1) {
assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!");
MBBNumbering[MBBI->getNumber()] = nullptr;
}
// If BlockNo is already taken, set that block's number to -1.
if (MBBNumbering[BlockNo])
MBBNumbering[BlockNo]->setNumber(-1);
MBBNumbering[BlockNo] = &*MBBI;
MBBI->setNumber(BlockNo);
}
}
// Okay, all the blocks are renumbered. If we have compactified the block
// numbering, shrink MBBNumbering now.
assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
MBBNumbering.resize(BlockNo);
}
/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
const DebugLoc &DL,
bool NoImp) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, MCID, DL, NoImp);
}
/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
/// identical in all ways except the instruction has no parent, prev, or next.
MachineInstr *
MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, *Orig);
}
MachineInstr &MachineFunction::CloneMachineInstrBundle(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) {
MachineInstr *FirstClone = nullptr;
MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
while (true) {
MachineInstr *Cloned = CloneMachineInstr(&*I);
MBB.insert(InsertBefore, Cloned);
if (FirstClone == nullptr) {
FirstClone = Cloned;
} else {
Cloned->bundleWithPred();
}
if (!I->isBundledWithSucc())
break;
++I;
}
return *FirstClone;
}
/// Delete the given MachineInstr.
///
/// This function also serves as the MachineInstr destructor - the real
/// ~MachineInstr() destructor must be empty.
void
MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
// Verify that a call site info is at valid state. This assertion should
// be triggered during the implementation of support for the
// call site info of a new architecture. If the assertion is triggered,
// back trace will tell where to insert a call to updateCallSiteInfo().
assert((!MI->isCall(MachineInstr::IgnoreBundle) ||
CallSitesInfo.find(MI) == CallSitesInfo.end()) &&
"Call site info was not updated!");
// Strip it for parts. The operand array and the MI object itself are
// independently recyclable.
if (MI->Operands)
deallocateOperandArray(MI->CapOperands, MI->Operands);
// Don't call ~MachineInstr() which must be trivial anyway because
// ~MachineFunction drops whole lists of MachineInstrs wihout calling their
// destructors.
InstructionRecycler.Deallocate(Allocator, MI);
}
/// Allocate a new MachineBasicBlock. Use this instead of
/// `new MachineBasicBlock'.
MachineBasicBlock *
MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
MachineBasicBlock(*this, bb);
}
/// Delete the given MachineBasicBlock.
void
MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
assert(MBB->getParent() == this && "MBB parent mismatch!");
MBB->~MachineBasicBlock();
BasicBlockRecycler.Deallocate(Allocator, MBB);
}
MachineMemOperand *MachineFunction::getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
SyncScope::ID SSID, AtomicOrdering Ordering,
AtomicOrdering FailureOrdering) {
return new (Allocator)
MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
SSID, Ordering, FailureOrdering);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
// If there is no pointer value, the offset isn't tracked so we need to adjust
// the base alignment.
unsigned Align = PtrInfo.V.isNull()
? MinAlign(MMO->getBaseAlignment(), Offset)
: MMO->getBaseAlignment();
return new (Allocator)
MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size,
Align, AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
const AAMDNodes &AAInfo) {
MachinePointerInfo MPI = MMO->getValue() ?
MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
return new (Allocator)
MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(),
MMO->getBaseAlignment(), AAInfo,
MMO->getRanges(), MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand::Flags Flags) {
return new (Allocator) MachineMemOperand(
MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlignment(),
MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
MachineInstr::ExtraInfo *
MachineFunction::createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
MCSymbol *PreInstrSymbol,
MCSymbol *PostInstrSymbol) {
return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
- PostInstrSymbol);
+ PostInstrSymbol, nullptr);
+}
+
+MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfoWithMarker(
+ ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
+ MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker) {
+ return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
+ PostInstrSymbol, HeapAllocMarker);
}
const char *MachineFunction::createExternalSymbolName(StringRef Name) {
char *Dest = Allocator.Allocate<char>(Name.size() + 1);
llvm::copy(Name, Dest);
Dest[Name.size()] = 0;
return Dest;
}
uint32_t *MachineFunction::allocateRegMask() {
unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
memset(Mask, 0, Size * sizeof(Mask[0]));
return Mask;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineFunction::dump() const {
print(dbgs());
}
#endif
StringRef MachineFunction::getName() const {
return getFunction().getName();
}
void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
OS << "# Machine code for function " << getName() << ": ";
getProperties().print(OS);
OS << '\n';
// Print Frame Information
FrameInfo->print(*this, OS);
// Print JumpTable Information
if (JumpTableInfo)
JumpTableInfo->print(OS);
// Print Constant Pool
ConstantPool->print(OS);
const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
if (RegInfo && !RegInfo->livein_empty()) {
OS << "Function Live Ins: ";
for (MachineRegisterInfo::livein_iterator
I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
OS << printReg(I->first, TRI);
if (I->second)
OS << " in " << printReg(I->second, TRI);
if (std::next(I) != E)
OS << ", ";
}
OS << '\n';
}
ModuleSlotTracker MST(getFunction().getParent());
MST.incorporateFunction(getFunction());
for (const auto &BB : *this) {
OS << '\n';
// If we print the whole function, print it at its most verbose level.
BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
}
OS << "\n# End machine code for function " << getName() << ".\n\n";
}
namespace llvm {
template<>
struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineFunction *F) {
return ("CFG for '" + F->getName() + "' function").str();
}
std::string getNodeLabel(const MachineBasicBlock *Node,
const MachineFunction *Graph) {
std::string OutStr;
{
raw_string_ostream OSS(OutStr);
if (isSimple()) {
OSS << printMBBReference(*Node);
if (const BasicBlock *BB = Node->getBasicBlock())
OSS << ": " << BB->getName();
} else
Node->print(OSS);
}
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
for (unsigned i = 0; i != OutStr.length(); ++i)
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
}
return OutStr;
}
};
} // end namespace llvm
void MachineFunction::viewCFG() const
{
#ifndef NDEBUG
ViewGraph(this, "mf" + getName());
#else
errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
void MachineFunction::viewCFGOnly() const
{
#ifndef NDEBUG
ViewGraph(this, "mf" + getName(), true);
#else
errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
/// Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
const TargetRegisterClass *RC) {
MachineRegisterInfo &MRI = getRegInfo();
unsigned VReg = MRI.getLiveInVirtReg(PReg);
if (VReg) {
const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
(void)VRegRC;
// A physical register can be added several times.
// Between two calls, the register class of the related virtual register
// may have been constrained to match some operation constraints.
// In that case, check that the current register class includes the
// physical register and is a sub class of the specified RC.
assert((VRegRC == RC || (VRegRC->contains(PReg) &&
RC->hasSubClassEq(VRegRC))) &&
"Register class mismatch!");
return VReg;
}
VReg = MRI.createVirtualRegister(RC);
MRI.addLiveIn(PReg, VReg);
return VReg;
}
/// Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
const DataLayout &DL = getDataLayout();
assert(JumpTableInfo && "No jump tables");
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
: DL.getPrivateGlobalPrefix();
SmallString<60> Name;
raw_svector_ostream(Name)
<< Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
return Ctx.getOrCreateSymbol(Name);
}
/// Return a function-local symbol to represent the PIC base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
const DataLayout &DL = getDataLayout();
return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
Twine(getFunctionNumber()) + "$pb");
}
/// \name Exception Handling
/// \{
LandingPadInfo &
MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
unsigned N = LandingPads.size();
for (unsigned i = 0; i < N; ++i) {
LandingPadInfo &LP = LandingPads[i];
if (LP.LandingPadBlock == LandingPad)
return LP;
}
LandingPads.push_back(LandingPadInfo(LandingPad));
return LandingPads[N];
}
void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.BeginLabels.push_back(BeginLabel);
LP.EndLabels.push_back(EndLabel);
}
MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.LandingPadLabel = LandingPadLabel;
const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
if (const auto *PF =
dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()))
getMMI().addPersonality(PF);
if (LPI->isCleanup())
addCleanup(LandingPad);
// FIXME: New EH - Add the clauses in reverse order. This isn't 100%
// correct, but we need to do it this way because of how the DWARF EH
// emitter processes the clauses.
for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
Value *Val = LPI->getClause(I - 1);
if (LPI->isCatch(I - 1)) {
addCatchTypeInfo(LandingPad,
dyn_cast<GlobalValue>(Val->stripPointerCasts()));
} else {
// Add filters in a list.
auto *CVal = cast<Constant>(Val);
SmallVector<const GlobalValue *, 4> FilterList;
for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
II != IE; ++II)
FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
addFilterTypeInfo(LandingPad, FilterList);
}
}
} else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) {
Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts();
addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo));
}
} else {
assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
}
return LandingPadLabel;
}
void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
for (unsigned N = TyInfo.size(); N; --N)
LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
}
void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
std::vector<unsigned> IdsInFilter(TyInfo.size());
for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
}
void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap,
bool TidyIfNoBeginLabels) {
for (unsigned i = 0; i != LandingPads.size(); ) {
LandingPadInfo &LandingPad = LandingPads[i];
if (LandingPad.LandingPadLabel &&
!LandingPad.LandingPadLabel->isDefined() &&
(!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
LandingPad.LandingPadLabel = nullptr;
// Special case: we *should* emit LPs with null LP MBB. This indicates
// "nounwind" case.
if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
LandingPads.erase(LandingPads.begin() + i);
continue;
}
if (TidyIfNoBeginLabels) {
for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
MCSymbol *EndLabel = LandingPad.EndLabels[j];
if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) &&
(EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0)))
continue;
LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
--j;
--e;
}
// Remove landing pads with no try-ranges.
if (LandingPads[i].BeginLabels.empty()) {
LandingPads.erase(LandingPads.begin() + i);
continue;
}
}
// If there is no landing pad, ensure that the list of typeids is empty.
// If the only typeid is a cleanup, this is the same as having no typeids.
if (!LandingPad.LandingPadBlock ||
(LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
LandingPad.TypeIds.clear();
++i;
}
}
void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.TypeIds.push_back(0);
}
void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
const Function *Filter,
const BlockAddress *RecoverBA) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
SEHHandler Handler;
Handler.FilterOrFinally = Filter;
Handler.RecoverBA = RecoverBA;
LP.SEHHandlers.push_back(Handler);
}
void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
const Function *Cleanup) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
SEHHandler Handler;
Handler.FilterOrFinally = Cleanup;
Handler.RecoverBA = nullptr;
LP.SEHHandlers.push_back(Handler);
}
void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
ArrayRef<unsigned> Sites) {
LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
}
unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
if (TypeInfos[i] == TI) return i + 1;
TypeInfos.push_back(TI);
return TypeInfos.size();
}
int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
// If the new filter coincides with the tail of an existing filter, then
// re-use the existing filter. Folding filters more than this requires
// re-ordering filters and/or their elements - probably not worth it.
for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
E = FilterEnds.end(); I != E; ++I) {
unsigned i = *I, j = TyIds.size();
while (i && j)
if (FilterIds[--i] != TyIds[--j])
goto try_next;
if (!j)
// The new filter coincides with range [i, end) of the existing filter.
return -(1 + i);
try_next:;
}
// Add the new filter.
int FilterID = -(1 + FilterIds.size());
FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
FilterEnds.push_back(FilterIds.size());
FilterIds.push_back(0); // terminator
return FilterID;
}
void MachineFunction::addCodeViewHeapAllocSite(MachineInstr *I, MDNode *MD) {
MCSymbol *BeginLabel = Ctx.createTempSymbol("heapallocsite", true);
MCSymbol *EndLabel = Ctx.createTempSymbol("heapallocsite", true);
I->setPreInstrSymbol(*this, BeginLabel);
I->setPostInstrSymbol(*this, EndLabel);
DIType *DI = dyn_cast<DIType>(MD);
CodeViewHeapAllocSites.push_back(std::make_tuple(BeginLabel, EndLabel, DI));
}
void MachineFunction::updateCallSiteInfo(const MachineInstr *Old,
const MachineInstr *New) {
if (!Target.Options.EnableDebugEntryValues || Old == New)
return;
assert(Old->isCall() && (!New || New->isCall()) &&
"Call site info referes only to call instructions!");
CallSiteInfoMap::iterator CSIt = CallSitesInfo.find(Old);
if (CSIt == CallSitesInfo.end())
return;
CallSiteInfo CSInfo = std::move(CSIt->second);
CallSitesInfo.erase(CSIt);
if (New)
CallSitesInfo[New] = CSInfo;
}
/// \}
//===----------------------------------------------------------------------===//
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
/// Return the size of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerSize();
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return 8;
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return 4;
case MachineJumpTableInfo::EK_Inline:
return 0;
}
llvm_unreachable("Unknown jump table encoding!");
}
/// Return the alignment of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer
// alignment.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerABIAlignment(0);
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return TD.getABIIntegerTypeAlignment(64);
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return TD.getABIIntegerTypeAlignment(32);
case MachineJumpTableInfo::EK_Inline:
return 1;
}
llvm_unreachable("Unknown jump table encoding!");
}
/// Create a new jump table entry in the jump table info.
unsigned MachineJumpTableInfo::createJumpTableIndex(
const std::vector<MachineBasicBlock*> &DestBBs) {
assert(!DestBBs.empty() && "Cannot create an empty jump table!");
JumpTables.push_back(MachineJumpTableEntry(DestBBs));
return JumpTables.size()-1;
}
/// If Old is the target of any jump tables, update the jump tables to branch
/// to New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
bool MadeChange = false;
for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
ReplaceMBBInJumpTable(i, Old, New);
return MadeChange;
}
/// If Old is a target of the jump tables, update the jump table to branch to
/// New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
bool MadeChange = false;
MachineJumpTableEntry &JTE = JumpTables[Idx];
for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
if (JTE.MBBs[j] == Old) {
JTE.MBBs[j] = New;
MadeChange = true;
}
return MadeChange;
}
void MachineJumpTableInfo::print(raw_ostream &OS) const {
if (JumpTables.empty()) return;
OS << "Jump Tables:\n";
for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
OS << printJumpTableEntryReference(i) << ':';
for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
if (i != e)
OS << '\n';
}
OS << '\n';
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
#endif
Printable llvm::printJumpTableEntryReference(unsigned Idx) {
return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
}
//===----------------------------------------------------------------------===//
// MachineConstantPool implementation
//===----------------------------------------------------------------------===//
void MachineConstantPoolValue::anchor() {}
Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
}
bool MachineConstantPoolEntry::needsRelocation() const {
if (isMachineConstantPoolEntry())
return true;
return Val.ConstVal->needsRelocation();
}
SectionKind
MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
if (needsRelocation())
return SectionKind::getReadOnlyWithRel();
switch (DL->getTypeAllocSize(getType())) {
case 4:
return SectionKind::getMergeableConst4();
case 8:
return SectionKind::getMergeableConst8();
case 16:
return SectionKind::getMergeableConst16();
case 32:
return SectionKind::getMergeableConst32();
default:
return SectionKind::getReadOnly();
}
}
MachineConstantPool::~MachineConstantPool() {
// A constant may be a member of both Constants and MachineCPVsSharingEntries,
// so keep track of which we've deleted to avoid double deletions.
DenseSet<MachineConstantPoolValue*> Deleted;
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (Constants[i].isMachineConstantPoolEntry()) {
Deleted.insert(Constants[i].Val.MachineCPVal);
delete Constants[i].Val.MachineCPVal;
}
for (DenseSet<MachineConstantPoolValue*>::iterator I =
MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
I != E; ++I) {
if (Deleted.count(*I) == 0)
delete *I;
}
}
/// Test whether the given two constants can be allocated the same constant pool
/// entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const DataLayout &DL) {
// Handle the trivial case quickly.
if (A == B) return true;
// If they have the same type but weren't the same constant, quickly
// reject them.
if (A->getType() == B->getType()) return false;
// We can't handle structs or arrays.
if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
return false;
// For now, only support constants with the same size.
uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
return false;
Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
// Try constant folding a bitcast of both instructions to an integer. If we
// get two identical ConstantInt's, then we are good to share them. We use
// the constant folding APIs to do this so that we get the benefit of
// DataLayout.
if (isa<PointerType>(A->getType()))
A = ConstantFoldCastOperand(Instruction::PtrToInt,
const_cast<Constant *>(A), IntTy, DL);
else if (A->getType() != IntTy)
A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
IntTy, DL);
if (isa<PointerType>(B->getType()))
B = ConstantFoldCastOperand(Instruction::PtrToInt,
const_cast<Constant *>(B), IntTy, DL);
else if (B->getType() != IntTy)
B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
IntTy, DL);
return A == B;
}
/// Create a new entry in the constant pool or return an existing one.
/// User must specify the log2 of the minimum required alignment for the object.
unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
// Check to see if we already have this constant.
//
// FIXME, this could be made much more efficient for large constant pools.
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (!Constants[i].isMachineConstantPoolEntry() &&
CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
if ((unsigned)Constants[i].getAlignment() < Alignment)
Constants[i].Alignment = Alignment;
return i;
}
Constants.push_back(MachineConstantPoolEntry(C, Alignment));
return Constants.size()-1;
}
unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
// Check to see if we already have this constant.
//
// FIXME, this could be made much more efficient for large constant pools.
int Idx = V->getExistingMachineCPValue(this, Alignment);
if (Idx != -1) {
MachineCPVsSharingEntries.insert(V);
return (unsigned)Idx;
}
Constants.push_back(MachineConstantPoolEntry(V, Alignment));
return Constants.size()-1;
}
void MachineConstantPool::print(raw_ostream &OS) const {
if (Constants.empty()) return;
OS << "Constant Pool:\n";
for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
OS << " cp#" << i << ": ";
if (Constants[i].isMachineConstantPoolEntry())
Constants[i].Val.MachineCPVal->print(OS);
else
Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
OS << ", align=" << Constants[i].getAlignment();
OS << "\n";
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
#endif
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision 356465)
@@ -1,2180 +1,2178 @@
//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Methods common to all machine instructions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <utility>
using namespace llvm;
static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
if (const MachineBasicBlock *MBB = MI.getParent())
if (const MachineFunction *MF = MBB->getParent())
return MF;
return nullptr;
}
// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
// it.
static void tryToGetTargetInfo(const MachineInstr &MI,
const TargetRegisterInfo *&TRI,
const MachineRegisterInfo *&MRI,
const TargetIntrinsicInfo *&IntrinsicInfo,
const TargetInstrInfo *&TII) {
if (const MachineFunction *MF = getMFIfAvailable(MI)) {
TRI = MF->getSubtarget().getRegisterInfo();
MRI = &MF->getRegInfo();
IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
TII = MF->getSubtarget().getInstrInfo();
}
}
void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
if (MCID->ImplicitDefs)
for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
++ImpDefs)
addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
if (MCID->ImplicitUses)
for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
++ImpUses)
addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
}
/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
/// implicit operands. It reserves space for the number of operands specified by
/// the MCInstrDesc.
MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
DebugLoc dl, bool NoImp)
: MCID(&tid), debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
// Reserve space for the expected number of operands.
if (unsigned NumOps = MCID->getNumOperands() +
MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
CapOperands = OperandCapacity::get(NumOps);
Operands = MF.allocateOperandArray(CapOperands);
}
if (!NoImp)
addImplicitDefUseOperands(MF);
}
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
Operands = MF.allocateOperandArray(CapOperands);
// Copy operands.
for (const MachineOperand &MO : MI.operands())
addOperand(MF, MO);
// Copy all the sensible flags.
setFlags(MI.Flags);
}
/// getRegInfo - If this instruction is embedded into a MachineFunction,
/// return the MachineRegisterInfo object for the current function, otherwise
/// return null.
MachineRegisterInfo *MachineInstr::getRegInfo() {
if (MachineBasicBlock *MBB = getParent())
return &MBB->getParent()->getRegInfo();
return nullptr;
}
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
for (MachineOperand &MO : operands())
if (MO.isReg())
MRI.removeRegOperandFromUseList(&MO);
}
/// AddRegOperandsToUseLists - Add all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
for (MachineOperand &MO : operands())
if (MO.isReg())
MRI.addRegOperandToUseList(&MO);
}
void MachineInstr::addOperand(const MachineOperand &Op) {
MachineBasicBlock *MBB = getParent();
assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
MachineFunction *MF = MBB->getParent();
assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
addOperand(*MF, Op);
}
/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
/// ranges. If MRI is non-null also update use-def chains.
static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
unsigned NumOps, MachineRegisterInfo *MRI) {
if (MRI)
return MRI->moveOperands(Dst, Src, NumOps);
// MachineOperand is a trivially copyable type so we can just use memmove.
std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
}
/// addOperand - Add the specified operand to the instruction. If it is an
/// implicit operand, it is added to the end of the operand list. If it is
/// an explicit operand it is added at the end of the explicit operand list
/// (before the first implicit operand).
void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
assert(MCID && "Cannot add operands before providing an instr descriptor");
// Check if we're adding one of our existing operands.
if (&Op >= Operands && &Op < Operands + NumOperands) {
// This is unusual: MI->addOperand(MI->getOperand(i)).
// If adding Op requires reallocating or moving existing operands around,
// the Op reference could go stale. Support it by copying Op.
MachineOperand CopyOp(Op);
return addOperand(MF, CopyOp);
}
// Find the insert location for the new operand. Implicit registers go at
// the end, everything else goes before the implicit regs.
//
// FIXME: Allow mixed explicit and implicit operands on inline asm.
// InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
// implicit-defs, but they must not be moved around. See the FIXME in
// InstrEmitter.cpp.
unsigned OpNo = getNumOperands();
bool isImpReg = Op.isReg() && Op.isImplicit();
if (!isImpReg && !isInlineAsm()) {
while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
--OpNo;
assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
}
}
#ifndef NDEBUG
bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata ||
Op.getType() == MachineOperand::MO_MCSymbol;
// OpNo now points as the desired insertion point. Unless this is a variadic
// instruction, only implicit regs are allowed beyond MCID->getNumOperands().
// RegMask operands go between the explicit and implicit operands.
assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
OpNo < MCID->getNumOperands() || isDebugOp) &&
"Trying to add an operand to a machine instr that is already done!");
#endif
MachineRegisterInfo *MRI = getRegInfo();
// Determine if the Operands array needs to be reallocated.
// Save the old capacity and operand array.
OperandCapacity OldCap = CapOperands;
MachineOperand *OldOperands = Operands;
if (!OldOperands || OldCap.getSize() == getNumOperands()) {
CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
Operands = MF.allocateOperandArray(CapOperands);
// Move the operands before the insertion point.
if (OpNo)
moveOperands(Operands, OldOperands, OpNo, MRI);
}
// Move the operands following the insertion point.
if (OpNo != NumOperands)
moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
MRI);
++NumOperands;
// Deallocate the old operand array.
if (OldOperands != Operands && OldOperands)
MF.deallocateOperandArray(OldCap, OldOperands);
// Copy Op into place. It still needs to be inserted into the MRI use lists.
MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
NewMO->ParentMI = this;
// When adding a register operand, tell MRI about it.
if (NewMO->isReg()) {
// Ensure isOnRegUseList() returns false, regardless of Op's status.
NewMO->Contents.Reg.Prev = nullptr;
// Ignore existing ties. This is not a property that can be copied.
NewMO->TiedTo = 0;
// Add the new operand to MRI, but only for instructions in an MBB.
if (MRI)
MRI->addRegOperandToUseList(NewMO);
// The MCID operand information isn't accurate until we start adding
// explicit operands. The implicit operands are added first, then the
// explicits are inserted before them.
if (!isImpReg) {
// Tie uses to defs as indicated in MCInstrDesc.
if (NewMO->isUse()) {
int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
if (DefIdx != -1)
tieOperands(DefIdx, OpNo);
}
// If the register operand is flagged as early, mark the operand as such.
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
NewMO->setIsEarlyClobber(true);
}
}
}
/// RemoveOperand - Erase an operand from an instruction, leaving it with one
/// fewer operand than it started with.
///
void MachineInstr::RemoveOperand(unsigned OpNo) {
assert(OpNo < getNumOperands() && "Invalid operand number");
untieRegOperand(OpNo);
#ifndef NDEBUG
// Moving tied operands would break the ties.
for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
if (Operands[i].isReg())
assert(!Operands[i].isTied() && "Cannot move tied operands");
#endif
MachineRegisterInfo *MRI = getRegInfo();
if (MRI && Operands[OpNo].isReg())
MRI->removeRegOperandFromUseList(Operands + OpNo);
// Don't call the MachineOperand destructor. A lot of this code depends on
// MachineOperand having a trivial destructor anyway, and adding a call here
// wouldn't make it 'destructor-correct'.
if (unsigned N = NumOperands - 1 - OpNo)
moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
--NumOperands;
}
-void MachineInstr::dropMemRefs(MachineFunction &MF) {
- if (memoperands_empty())
- return;
+void MachineInstr::setExtraInfo(MachineFunction &MF,
+ ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol,
+ MCSymbol *PostInstrSymbol,
+ MDNode *HeapAllocMarker) {
+ bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
+ bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
+ bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
+ int NumPointers =
+ MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol + HasHeapAllocMarker;
- // See if we can just drop all of our extra info.
- if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
+ // Drop all extra info if there is none.
+ if (NumPointers <= 0) {
Info.clear();
return;
}
- if (!getPostInstrSymbol()) {
- Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
+
+ // If more than one pointer, then store out of line. Store heap alloc markers
+ // out of line because PointerSumType cannot hold more than 4 tag types with
+ // 32-bit pointers.
+ // FIXME: Maybe we should make the symbols in the extra info mutable?
+ else if (NumPointers > 1 || HasHeapAllocMarker) {
+ Info.set<EIIK_OutOfLine>(MF.createMIExtraInfoWithMarker(
+ MMOs, PreInstrSymbol, PostInstrSymbol, HeapAllocMarker));
return;
}
- if (!getPreInstrSymbol()) {
- Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
+
+ // Otherwise store the single pointer inline.
+ if (HasPreInstrSymbol)
+ Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
+ else if (HasPostInstrSymbol)
+ Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
+ else
+ Info.set<EIIK_MMO>(MMOs[0]);
+}
+
+void MachineInstr::dropMemRefs(MachineFunction &MF) {
+ if (memoperands_empty())
return;
- }
- // Otherwise allocate a fresh extra info with just these symbols.
- Info.set<EIIK_OutOfLine>(
- MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
+ setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
+ getHeapAllocMarker());
}
void MachineInstr::setMemRefs(MachineFunction &MF,
ArrayRef<MachineMemOperand *> MMOs) {
if (MMOs.empty()) {
dropMemRefs(MF);
return;
}
- // Try to store a single MMO inline.
- if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
- Info.set<EIIK_MMO>(MMOs[0]);
- return;
- }
-
- // Otherwise create an extra info struct with all of our info.
- Info.set<EIIK_OutOfLine>(
- MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
+ setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
+ getHeapAllocMarker());
}
void MachineInstr::addMemOperand(MachineFunction &MF,
MachineMemOperand *MO) {
SmallVector<MachineMemOperand *, 2> MMOs;
MMOs.append(memoperands_begin(), memoperands_end());
MMOs.push_back(MO);
setMemRefs(MF, MMOs);
}
void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
if (this == &MI)
// Nothing to do for a self-clone!
return;
assert(&MF == MI.getMF() &&
"Invalid machine functions when cloning memory refrences!");
// See if we can just steal the extra info already allocated for the
// instruction. We can do this whenever the pre- and post-instruction symbols
// are the same (including null).
if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
- getPostInstrSymbol() == MI.getPostInstrSymbol()) {
+ getPostInstrSymbol() == MI.getPostInstrSymbol() &&
+ getHeapAllocMarker() == MI.getHeapAllocMarker()) {
Info = MI.Info;
return;
}
// Otherwise, fall back on a copy-based clone.
setMemRefs(MF, MI.memoperands());
}
/// Check to see if the MMOs pointed to by the two MemRefs arrays are
/// identical.
static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
ArrayRef<MachineMemOperand *> RHS) {
if (LHS.size() != RHS.size())
return false;
auto LHSPointees = make_pointee_range(LHS);
auto RHSPointees = make_pointee_range(RHS);
return std::equal(LHSPointees.begin(), LHSPointees.end(),
RHSPointees.begin());
}
void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
ArrayRef<const MachineInstr *> MIs) {
// Try handling easy numbers of MIs with simpler mechanisms.
if (MIs.empty()) {
dropMemRefs(MF);
return;
}
if (MIs.size() == 1) {
cloneMemRefs(MF, *MIs[0]);
return;
}
// Because an empty memoperands list provides *no* information and must be
// handled conservatively (assuming the instruction can do anything), the only
// way to merge with it is to drop all other memoperands.
if (MIs[0]->memoperands_empty()) {
dropMemRefs(MF);
return;
}
// Handle the general case.
SmallVector<MachineMemOperand *, 2> MergedMMOs;
// Start with the first instruction.
assert(&MF == MIs[0]->getMF() &&
"Invalid machine functions when cloning memory references!");
MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
// Now walk all the other instructions and accumulate any different MMOs.
for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
assert(&MF == MI.getMF() &&
"Invalid machine functions when cloning memory references!");
// Skip MIs with identical operands to the first. This is a somewhat
// arbitrary hack but will catch common cases without being quadratic.
// TODO: We could fully implement merge semantics here if needed.
if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
continue;
// Because an empty memoperands list provides *no* information and must be
// handled conservatively (assuming the instruction can do anything), the
// only way to merge with it is to drop all other memoperands.
if (MI.memoperands_empty()) {
dropMemRefs(MF);
return;
}
// Otherwise accumulate these into our temporary buffer of the merged state.
MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
}
setMemRefs(MF, MergedMMOs);
}
void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
- MCSymbol *OldSymbol = getPreInstrSymbol();
- if (OldSymbol == Symbol)
+ // Do nothing if old and new symbols are the same.
+ if (Symbol == getPreInstrSymbol())
return;
- if (OldSymbol && !Symbol) {
- // We're removing a symbol rather than adding one. Try to clean up any
- // extra info carried around.
- if (Info.is<EIIK_PreInstrSymbol>()) {
- Info.clear();
- return;
- }
- if (memoperands_empty()) {
- assert(getPostInstrSymbol() &&
- "Should never have only a single symbol allocated out-of-line!");
- Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
- return;
- }
-
- // Otherwise fallback on the generic update.
- } else if (!Info || Info.is<EIIK_PreInstrSymbol>()) {
- // If we don't have any other extra info, we can store this inline.
- Info.set<EIIK_PreInstrSymbol>(Symbol);
+ // If there was only one symbol and we're removing it, just clear info.
+ if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
+ Info.clear();
return;
}
- // Otherwise, allocate a full new set of extra info.
- // FIXME: Maybe we should make the symbols in the extra info mutable?
- Info.set<EIIK_OutOfLine>(
- MF.createMIExtraInfo(memoperands(), Symbol, getPostInstrSymbol()));
+ setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
+ getHeapAllocMarker());
}
void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
- MCSymbol *OldSymbol = getPostInstrSymbol();
- if (OldSymbol == Symbol)
+ // Do nothing if old and new symbols are the same.
+ if (Symbol == getPostInstrSymbol())
return;
- if (OldSymbol && !Symbol) {
- // We're removing a symbol rather than adding one. Try to clean up any
- // extra info carried around.
- if (Info.is<EIIK_PostInstrSymbol>()) {
- Info.clear();
- return;
- }
- if (memoperands_empty()) {
- assert(getPreInstrSymbol() &&
- "Should never have only a single symbol allocated out-of-line!");
- Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
- return;
- }
-
- // Otherwise fallback on the generic update.
- } else if (!Info || Info.is<EIIK_PostInstrSymbol>()) {
- // If we don't have any other extra info, we can store this inline.
- Info.set<EIIK_PostInstrSymbol>(Symbol);
+ // If there was only one symbol and we're removing it, just clear info.
+ if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
+ Info.clear();
return;
}
- // Otherwise, allocate a full new set of extra info.
- // FIXME: Maybe we should make the symbols in the extra info mutable?
- Info.set<EIIK_OutOfLine>(
- MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol));
+ setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
+ getHeapAllocMarker());
}
+void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
+ // Do nothing if old and new symbols are the same.
+ if (Marker == getHeapAllocMarker())
+ return;
+
+ setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
+ Marker);
+}
+
void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
const MachineInstr &MI) {
if (this == &MI)
// Nothing to do for a self-clone!
return;
assert(&MF == MI.getMF() &&
"Invalid machine functions when cloning instruction symbols!");
setPreInstrSymbol(MF, MI.getPreInstrSymbol());
setPostInstrSymbol(MF, MI.getPostInstrSymbol());
+ setHeapAllocMarker(MF, MI.getHeapAllocMarker());
}
uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
// For now, the just return the union of the flags. If the flags get more
// complicated over time, we might need more logic here.
return getFlags() | Other.getFlags();
}
uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
uint16_t MIFlags = 0;
// Copy the wrapping flags.
if (const OverflowingBinaryOperator *OB =
dyn_cast<OverflowingBinaryOperator>(&I)) {
if (OB->hasNoSignedWrap())
MIFlags |= MachineInstr::MIFlag::NoSWrap;
if (OB->hasNoUnsignedWrap())
MIFlags |= MachineInstr::MIFlag::NoUWrap;
}
// Copy the exact flag.
if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
if (PE->isExact())
MIFlags |= MachineInstr::MIFlag::IsExact;
// Copy the fast-math flags.
if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
const FastMathFlags Flags = FP->getFastMathFlags();
if (Flags.noNaNs())
MIFlags |= MachineInstr::MIFlag::FmNoNans;
if (Flags.noInfs())
MIFlags |= MachineInstr::MIFlag::FmNoInfs;
if (Flags.noSignedZeros())
MIFlags |= MachineInstr::MIFlag::FmNsz;
if (Flags.allowReciprocal())
MIFlags |= MachineInstr::MIFlag::FmArcp;
if (Flags.allowContract())
MIFlags |= MachineInstr::MIFlag::FmContract;
if (Flags.approxFunc())
MIFlags |= MachineInstr::MIFlag::FmAfn;
if (Flags.allowReassoc())
MIFlags |= MachineInstr::MIFlag::FmReassoc;
}
return MIFlags;
}
void MachineInstr::copyIRFlags(const Instruction &I) {
Flags = copyFlagsFromInstruction(I);
}
bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
assert(!isBundledWithPred() && "Must be called on bundle header");
for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
if (MII->getDesc().getFlags() & Mask) {
if (Type == AnyInBundle)
return true;
} else {
if (Type == AllInBundle && !MII->isBundle())
return false;
}
// This was the last instruction in the bundle.
if (!MII->isBundledWithSucc())
return Type == AllInBundle;
}
}
bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
MICheckType Check) const {
// If opcodes or number of operands are not the same then the two
// instructions are obviously not identical.
if (Other.getOpcode() != getOpcode() ||
Other.getNumOperands() != getNumOperands())
return false;
if (isBundle()) {
// We have passed the test above that both instructions have the same
// opcode, so we know that both instructions are bundles here. Let's compare
// MIs inside the bundle.
assert(Other.isBundle() && "Expected that both instructions are bundles.");
MachineBasicBlock::const_instr_iterator I1 = getIterator();
MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
// Loop until we analysed the last intruction inside at least one of the
// bundles.
while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
++I1;
++I2;
if (!I1->isIdenticalTo(*I2, Check))
return false;
}
// If we've reached the end of just one of the two bundles, but not both,
// the instructions are not identical.
if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
return false;
}
// Check operands to make sure they match.
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
const MachineOperand &OMO = Other.getOperand(i);
if (!MO.isReg()) {
if (!MO.isIdenticalTo(OMO))
return false;
continue;
}
// Clients may or may not want to ignore defs when testing for equality.
// For example, machine CSE pass only cares about finding common
// subexpressions, so it's safe to ignore virtual register defs.
if (MO.isDef()) {
if (Check == IgnoreDefs)
continue;
else if (Check == IgnoreVRegDefs) {
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
!TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
if (!MO.isIdenticalTo(OMO))
return false;
} else {
if (!MO.isIdenticalTo(OMO))
return false;
if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
return false;
}
} else {
if (!MO.isIdenticalTo(OMO))
return false;
if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
return false;
}
}
// If DebugLoc does not match then two debug instructions are not identical.
if (isDebugInstr())
if (getDebugLoc() && Other.getDebugLoc() &&
getDebugLoc() != Other.getDebugLoc())
return false;
return true;
}
const MachineFunction *MachineInstr::getMF() const {
return getParent()->getParent();
}
MachineInstr *MachineInstr::removeFromParent() {
assert(getParent() && "Not embedded in a basic block!");
return getParent()->remove(this);
}
MachineInstr *MachineInstr::removeFromBundle() {
assert(getParent() && "Not embedded in a basic block!");
return getParent()->remove_instr(this);
}
void MachineInstr::eraseFromParent() {
assert(getParent() && "Not embedded in a basic block!");
getParent()->erase(this);
}
void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
assert(getParent() && "Not embedded in a basic block!");
MachineBasicBlock *MBB = getParent();
MachineFunction *MF = MBB->getParent();
assert(MF && "Not embedded in a function!");
MachineInstr *MI = (MachineInstr *)this;
MachineRegisterInfo &MRI = MF->getRegInfo();
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
MRI.markUsesInDebugValueAsUndef(Reg);
}
MI->eraseFromParent();
}
void MachineInstr::eraseFromBundle() {
assert(getParent() && "Not embedded in a basic block!");
getParent()->erase_instr(this);
}
unsigned MachineInstr::getNumExplicitOperands() const {
unsigned NumOperands = MCID->getNumOperands();
if (!MCID->isVariadic())
return NumOperands;
for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
const MachineOperand &MO = getOperand(I);
// The operands must always be in the following order:
// - explicit reg defs,
// - other explicit operands (reg uses, immediates, etc.),
// - implicit reg defs
// - implicit reg uses
if (MO.isReg() && MO.isImplicit())
break;
++NumOperands;
}
return NumOperands;
}
unsigned MachineInstr::getNumExplicitDefs() const {
unsigned NumDefs = MCID->getNumDefs();
if (!MCID->isVariadic())
return NumDefs;
for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
const MachineOperand &MO = getOperand(I);
if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
break;
++NumDefs;
}
return NumDefs;
}
void MachineInstr::bundleWithPred() {
assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
setFlag(BundledPred);
MachineBasicBlock::instr_iterator Pred = getIterator();
--Pred;
assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
Pred->setFlag(BundledSucc);
}
void MachineInstr::bundleWithSucc() {
assert(!isBundledWithSucc() && "MI is already bundled with its successor");
setFlag(BundledSucc);
MachineBasicBlock::instr_iterator Succ = getIterator();
++Succ;
assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
Succ->setFlag(BundledPred);
}
void MachineInstr::unbundleFromPred() {
assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
clearFlag(BundledPred);
MachineBasicBlock::instr_iterator Pred = getIterator();
--Pred;
assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
Pred->clearFlag(BundledSucc);
}
void MachineInstr::unbundleFromSucc() {
assert(isBundledWithSucc() && "MI isn't bundled with its successor");
clearFlag(BundledSucc);
MachineBasicBlock::instr_iterator Succ = getIterator();
++Succ;
assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
Succ->clearFlag(BundledPred);
}
bool MachineInstr::isStackAligningInlineAsm() const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
return true;
}
return false;
}
InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
}
int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
unsigned *GroupNo) const {
assert(isInlineAsm() && "Expected an inline asm instruction");
assert(OpIdx < getNumOperands() && "OpIdx out of range");
// Ignore queries about the initial operands.
if (OpIdx < InlineAsm::MIOp_FirstOperand)
return -1;
unsigned Group = 0;
unsigned NumOps;
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
i += NumOps) {
const MachineOperand &FlagMO = getOperand(i);
// If we reach the implicit register operands, stop looking.
if (!FlagMO.isImm())
return -1;
NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
if (i + NumOps > OpIdx) {
if (GroupNo)
*GroupNo = Group;
return i;
}
++Group;
}
return -1;
}
const DILabel *MachineInstr::getDebugLabel() const {
assert(isDebugLabel() && "not a DBG_LABEL");
return cast<DILabel>(getOperand(0).getMetadata());
}
const DILocalVariable *MachineInstr::getDebugVariable() const {
assert(isDebugValue() && "not a DBG_VALUE");
return cast<DILocalVariable>(getOperand(2).getMetadata());
}
const DIExpression *MachineInstr::getDebugExpression() const {
assert(isDebugValue() && "not a DBG_VALUE");
return cast<DIExpression>(getOperand(3).getMetadata());
}
const TargetRegisterClass*
MachineInstr::getRegClassConstraint(unsigned OpIdx,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const {
assert(getParent() && "Can't have an MBB reference here!");
assert(getMF() && "Can't have an MF reference here!");
const MachineFunction &MF = *getMF();
// Most opcodes have fixed constraints in their MCInstrDesc.
if (!isInlineAsm())
return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
if (!getOperand(OpIdx).isReg())
return nullptr;
// For tied uses on inline asm, get the constraint from the def.
unsigned DefIdx;
if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
OpIdx = DefIdx;
// Inline asm stores register class constraints in the flag word.
int FlagIdx = findInlineAsmFlagIdx(OpIdx);
if (FlagIdx < 0)
return nullptr;
unsigned Flag = getOperand(FlagIdx).getImm();
unsigned RCID;
if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
InlineAsm::hasRegClassConstraint(Flag, RCID))
return TRI->getRegClass(RCID);
// Assume that all registers in a memory operand are pointers.
if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
return TRI->getPointerRegClass(MF);
return nullptr;
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI, bool ExploreBundle) const {
// Check every operands inside the bundle if we have
// been asked to.
if (ExploreBundle)
for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
++OpndIt)
CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
else
// Otherwise, just check the current operands.
for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
return CurRC;
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
assert(CurRC && "Invalid initial register class");
// Check if Reg is constrained by some of its use/def from MI.
const MachineOperand &MO = getOperand(OpIdx);
if (!MO.isReg() || MO.getReg() != Reg)
return CurRC;
// If yes, accumulate the constraints through the operand.
return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
unsigned OpIdx, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
const MachineOperand &MO = getOperand(OpIdx);
assert(MO.isReg() &&
"Cannot get register constraints for non-register operand");
assert(CurRC && "Invalid initial register class");
if (unsigned SubIdx = MO.getSubReg()) {
if (OpRC)
CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
else
CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
} else if (OpRC)
CurRC = TRI->getCommonSubClass(CurRC, OpRC);
return CurRC;
}
/// Return the number of instructions inside the MI bundle, not counting the
/// header instruction.
unsigned MachineInstr::getBundleSize() const {
MachineBasicBlock::const_instr_iterator I = getIterator();
unsigned Size = 0;
while (I->isBundledWithSucc()) {
++Size;
++I;
}
return Size;
}
/// Returns true if the MachineInstr has an implicit-use operand of exactly
/// the given register (not considering sub/super-registers).
bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
return true;
}
return false;
}
/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
int MachineInstr::findRegisterUseOperandIdx(
unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
if (!isKill || MO.isKill())
return i;
}
return -1;
}
/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
/// indicating if this instruction reads or writes Reg. This also considers
/// partial defines.
std::pair<bool,bool>
MachineInstr::readsWritesVirtualRegister(unsigned Reg,
SmallVectorImpl<unsigned> *Ops) const {
bool PartDef = false; // Partial redefine.
bool FullDef = false; // Full define.
bool Use = false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (Ops)
Ops->push_back(i);
if (MO.isUse())
Use |= !MO.isUndef();
else if (MO.getSubReg() && !MO.isUndef())
// A partial def undef doesn't count as reading the register.
PartDef = true;
else
FullDef = true;
}
// A partial redefine uses Reg unless there is also a full define.
return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
}
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
/// the specified register or -1 if it is not found. If isDead is true, defs
/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
/// also checks if there is a def of a super-register.
int
MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
const TargetRegisterInfo *TRI) const {
bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
// Accept regmask operands when Overlap is set.
// Ignore them when looking for a specific def operand (Overlap == false).
if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
return i;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
bool Found = (MOReg == Reg);
if (!Found && TRI && isPhys &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
if (Overlap)
Found = TRI->regsOverlap(MOReg, Reg);
else
Found = TRI->isSubRegister(MOReg, Reg);
}
if (Found && (!isDead || MO.isDead()))
return i;
}
return -1;
}
/// findFirstPredOperandIdx() - Find the index of the first operand in the
/// operand list that is used to represent the predicate. It returns -1 if
/// none is found.
int MachineInstr::findFirstPredOperandIdx() const {
// Don't call MCID.findFirstPredOperandIdx() because this variant
// is sometimes called on an instruction that's not yet complete, and
// so the number of operands is less than the MCID indicates. In
// particular, the PTX target does this.
const MCInstrDesc &MCID = getDesc();
if (MCID.isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (MCID.OpInfo[i].isPredicate())
return i;
}
return -1;
}
// MachineOperand::TiedTo is 4 bits wide.
const unsigned TiedMax = 15;
/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
///
/// Use and def operands can be tied together, indicated by a non-zero TiedTo
/// field. TiedTo can have these values:
///
/// 0: Operand is not tied to anything.
/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
/// TiedMax: Tied to an operand >= TiedMax-1.
///
/// The tied def must be one of the first TiedMax operands on a normal
/// instruction. INLINEASM instructions allow more tied defs.
///
void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
MachineOperand &DefMO = getOperand(DefIdx);
MachineOperand &UseMO = getOperand(UseIdx);
assert(DefMO.isDef() && "DefIdx must be a def operand");
assert(UseMO.isUse() && "UseIdx must be a use operand");
assert(!DefMO.isTied() && "Def is already tied to another use");
assert(!UseMO.isTied() && "Use is already tied to another def");
if (DefIdx < TiedMax)
UseMO.TiedTo = DefIdx + 1;
else {
// Inline asm can use the group descriptors to find tied operands, but on
// normal instruction, the tied def must be within the first TiedMax
// operands.
assert(isInlineAsm() && "DefIdx out of range");
UseMO.TiedTo = TiedMax;
}
// UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
}
/// Given the index of a tied register operand, find the operand it is tied to.
/// Defs are tied to uses and vice versa. Returns the index of the tied operand
/// which must exist.
unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
const MachineOperand &MO = getOperand(OpIdx);
assert(MO.isTied() && "Operand isn't tied");
// Normally TiedTo is in range.
if (MO.TiedTo < TiedMax)
return MO.TiedTo - 1;
// Uses on normal instructions can be out of range.
if (!isInlineAsm()) {
// Normal tied defs must be in the 0..TiedMax-1 range.
if (MO.isUse())
return TiedMax - 1;
// MO is a def. Search for the tied use.
for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
const MachineOperand &UseMO = getOperand(i);
if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
return i;
}
llvm_unreachable("Can't find tied use");
}
// Now deal with inline asm by parsing the operand group descriptor flags.
// Find the beginning of each operand group.
SmallVector<unsigned, 8> GroupIdx;
unsigned OpIdxGroup = ~0u;
unsigned NumOps;
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
i += NumOps) {
const MachineOperand &FlagMO = getOperand(i);
assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
unsigned CurGroup = GroupIdx.size();
GroupIdx.push_back(i);
NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
// OpIdx belongs to this operand group.
if (OpIdx > i && OpIdx < i + NumOps)
OpIdxGroup = CurGroup;
unsigned TiedGroup;
if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
continue;
// Operands in this group are tied to operands in TiedGroup which must be
// earlier. Find the number of operands between the two groups.
unsigned Delta = i - GroupIdx[TiedGroup];
// OpIdx is a use tied to TiedGroup.
if (OpIdxGroup == CurGroup)
return OpIdx - Delta;
// OpIdx is a def tied to this use group.
if (OpIdxGroup == TiedGroup)
return OpIdx + Delta;
}
llvm_unreachable("Invalid tied operand on inline asm");
}
/// clearKillInfo - Clears kill flags on all operands.
///
void MachineInstr::clearKillInfo() {
for (MachineOperand &MO : operands()) {
if (MO.isReg() && MO.isUse())
MO.setIsKill(false);
}
}
void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
unsigned SubIdx,
const TargetRegisterInfo &RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
if (SubIdx)
ToReg = RegInfo.getSubReg(ToReg, SubIdx);
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substPhysReg(ToReg, RegInfo);
}
} else {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substVirtReg(ToReg, SubIdx, RegInfo);
}
}
}
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
// Ignore stuff that we obviously can't move.
//
// Treat volatile loads as stores. This is not strictly necessary for
// volatiles, but it is required for atomic loads. It is not allowed to move
// a load across an atomic load with Ordering > Monotonic.
if (mayStore() || isCall() || isPHI() ||
(mayLoad() && hasOrderedMemoryRef())) {
SawStore = true;
return false;
}
if (isPosition() || isDebugInstr() || isTerminator() ||
mayRaiseFPException() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
// loaded value doesn't change between the load and the its intended
// destination. The check for isInvariantLoad gives the targe the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
if (mayLoad() && !isDereferenceableInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
// end of block, we can't move it.
return !SawStore;
return true;
}
bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other,
bool UseTBAA) const {
const MachineFunction *MF = getMF();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
// If neither instruction stores to memory, they can't alias in any
// meaningful way, even if they read from the same address.
if (!mayStore() && !Other.mayStore())
return false;
// Let the target decide if memory accesses cannot possibly overlap.
if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
return false;
// FIXME: Need to handle multiple memory operands to support all targets.
if (!hasOneMemOperand() || !Other.hasOneMemOperand())
return true;
MachineMemOperand *MMOa = *memoperands_begin();
MachineMemOperand *MMOb = *Other.memoperands_begin();
// The following interface to AA is fashioned after DAGCombiner::isAlias
// and operates with MachineMemOperand offset with some important
// assumptions:
// - LLVM fundamentally assumes flat address spaces.
// - MachineOperand offset can *only* result from legalization and
// cannot affect queries other than the trivial case of overlap
// checking.
// - These offsets never wrap and never step outside
// of allocated objects.
// - There should never be any negative offsets here.
//
// FIXME: Modify API to hide this math from "user"
// Even before we go to AA we can reason locally about some
// memory objects. It can save compile time, and possibly catch some
// corner cases not currently covered.
int64_t OffsetA = MMOa->getOffset();
int64_t OffsetB = MMOb->getOffset();
int64_t MinOffset = std::min(OffsetA, OffsetB);
uint64_t WidthA = MMOa->getSize();
uint64_t WidthB = MMOb->getSize();
bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
const Value *ValA = MMOa->getValue();
const Value *ValB = MMOb->getValue();
bool SameVal = (ValA && ValB && (ValA == ValB));
if (!SameVal) {
const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
if (PSVa && ValB && !PSVa->mayAlias(&MFI))
return false;
if (PSVb && ValA && !PSVb->mayAlias(&MFI))
return false;
if (PSVa && PSVb && (PSVa == PSVb))
SameVal = true;
}
if (SameVal) {
if (!KnownWidthA || !KnownWidthB)
return true;
int64_t MaxOffset = std::max(OffsetA, OffsetB);
int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
return (MinOffset + LowWidth > MaxOffset);
}
if (!AA)
return true;
if (!ValA || !ValB)
return true;
assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
: MemoryLocation::UnknownSize;
int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
: MemoryLocation::UnknownSize;
AliasResult AAResult = AA->alias(
MemoryLocation(ValA, OverlapA,
UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
MemoryLocation(ValB, OverlapB,
UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
return (AAResult != NoAlias);
}
/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
/// reference is not available. Return false if it is known to have no ordered
/// memory references.
bool MachineInstr::hasOrderedMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
if (!mayStore() &&
!mayLoad() &&
!isCall() &&
!hasUnmodeledSideEffects())
return false;
// Otherwise, if the instruction has no memory reference information,
// conservatively assume it wasn't preserved.
if (memoperands_empty())
return true;
// Check if any of our memory operands are ordered.
return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
return !MMO->isUnordered();
});
}
/// isDereferenceableInvariantLoad - Return true if this instruction will never
/// trap and is loading from a location whose value is invariant across a run of
/// this function.
bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
// If the instruction doesn't load at all, it isn't an invariant load.
if (!mayLoad())
return false;
// If the instruction has lost its memoperands, conservatively assume that
// it may not be an invariant load.
if (memoperands_empty())
return false;
const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
for (MachineMemOperand *MMO : memoperands()) {
if (!MMO->isUnordered())
// If the memory operand has ordering side effects, we can't move the
// instruction. Such an instruction is technically an invariant load,
// but the caller code would need updated to expect that.
return false;
if (MMO->isStore()) return false;
if (MMO->isInvariant() && MMO->isDereferenceable())
continue;
// A load from a constant PseudoSourceValue is invariant.
if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
if (PSV->isConstant(&MFI))
continue;
if (const Value *V = MMO->getValue()) {
// If we have an AliasAnalysis, ask it whether the memory is constant.
if (AA &&
AA->pointsToConstantMemory(
MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
continue;
}
// Otherwise assume conservatively.
return false;
}
// Everything checks out.
return true;
}
/// isConstantValuePHI - If the specified instruction is a PHI that always
/// merges together the same virtual register, return the register, otherwise
/// return 0.
unsigned MachineInstr::isConstantValuePHI() const {
if (!isPHI())
return 0;
assert(getNumOperands() >= 3 &&
"It's illegal to have a PHI without source operands");
unsigned Reg = getOperand(1).getReg();
for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
if (getOperand(i).getReg() != Reg)
return 0;
return Reg;
}
bool MachineInstr::hasUnmodeledSideEffects() const {
if (hasProperty(MCID::UnmodeledSideEffects))
return true;
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
return true;
}
return false;
}
bool MachineInstr::isLoadFoldBarrier() const {
return mayStore() || isCall() || hasUnmodeledSideEffects();
}
/// allDefsAreDead - Return true if all the defs of this instruction are dead.
///
bool MachineInstr::allDefsAreDead() const {
for (const MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.isUse())
continue;
if (!MO.isDead())
return false;
}
return true;
}
/// copyImplicitOps - Copy implicit register operands from specified
/// instruction to this instruction.
void MachineInstr::copyImplicitOps(MachineFunction &MF,
const MachineInstr &MI) {
for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
addOperand(MF, MO);
}
}
bool MachineInstr::hasComplexRegisterTies() const {
const MCInstrDesc &MCID = getDesc();
for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
const auto &Operand = getOperand(I);
if (!Operand.isReg() || Operand.isDef())
// Ignore the defined registers as MCID marks only the uses as tied.
continue;
int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
if (ExpectedTiedIdx != TiedIdx)
return true;
}
return false;
}
LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
const MachineRegisterInfo &MRI) const {
const MachineOperand &Op = getOperand(OpIdx);
if (!Op.isReg())
return LLT{};
if (isVariadic() || OpIdx >= getNumExplicitOperands())
return MRI.getType(Op.getReg());
auto &OpInfo = getDesc().OpInfo[OpIdx];
if (!OpInfo.isGenericType())
return MRI.getType(Op.getReg());
if (PrintedTypes[OpInfo.getGenericTypeIndex()])
return LLT{};
LLT TypeToPrint = MRI.getType(Op.getReg());
// Don't mark the type index printed if it wasn't actually printed: maybe
// another operand with the same type index has an actual type attached:
if (TypeToPrint.isValid())
PrintedTypes.set(OpInfo.getGenericTypeIndex());
return TypeToPrint;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineInstr::dump() const {
dbgs() << " ";
print(dbgs());
}
#endif
void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
bool SkipDebugLoc, bool AddNewLine,
const TargetInstrInfo *TII) const {
const Module *M = nullptr;
const Function *F = nullptr;
if (const MachineFunction *MF = getMFIfAvailable(*this)) {
F = &MF->getFunction();
M = F->getParent();
if (!TII)
TII = MF->getSubtarget().getInstrInfo();
}
ModuleSlotTracker MST(M);
if (F)
MST.incorporateFunction(*F);
print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
}
void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
bool AddNewLine, const TargetInstrInfo *TII) const {
// We can be a bit tidier if we know the MachineFunction.
const MachineFunction *MF = nullptr;
const TargetRegisterInfo *TRI = nullptr;
const MachineRegisterInfo *MRI = nullptr;
const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
if (isCFIInstruction())
assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
SmallBitVector PrintedTypes(8);
bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
auto getTiedOperandIdx = [&](unsigned OpIdx) {
if (!ShouldPrintRegisterTies)
return 0U;
const MachineOperand &MO = getOperand(OpIdx);
if (MO.isReg() && MO.isTied() && !MO.isDef())
return findTiedOperandIdx(OpIdx);
return 0U;
};
unsigned StartOp = 0;
unsigned e = getNumOperands();
// Print explicitly defined operands on the left of an assignment syntax.
while (StartOp < e) {
const MachineOperand &MO = getOperand(StartOp);
if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
break;
if (StartOp != 0)
OS << ", ";
LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
++StartOp;
}
if (StartOp != 0)
OS << " = ";
if (getFlag(MachineInstr::FrameSetup))
OS << "frame-setup ";
if (getFlag(MachineInstr::FrameDestroy))
OS << "frame-destroy ";
if (getFlag(MachineInstr::FmNoNans))
OS << "nnan ";
if (getFlag(MachineInstr::FmNoInfs))
OS << "ninf ";
if (getFlag(MachineInstr::FmNsz))
OS << "nsz ";
if (getFlag(MachineInstr::FmArcp))
OS << "arcp ";
if (getFlag(MachineInstr::FmContract))
OS << "contract ";
if (getFlag(MachineInstr::FmAfn))
OS << "afn ";
if (getFlag(MachineInstr::FmReassoc))
OS << "reassoc ";
if (getFlag(MachineInstr::NoUWrap))
OS << "nuw ";
if (getFlag(MachineInstr::NoSWrap))
OS << "nsw ";
if (getFlag(MachineInstr::IsExact))
OS << "exact ";
if (getFlag(MachineInstr::FPExcept))
OS << "fpexcept ";
// Print the opcode name.
if (TII)
OS << TII->getName(getOpcode());
else
OS << "UNKNOWN";
if (SkipOpers)
return;
// Print the rest of the operands.
bool FirstOp = true;
unsigned AsmDescOp = ~0u;
unsigned AsmOpCount = 0;
if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
// Print asm string.
OS << " ";
const unsigned OpIdx = InlineAsm::MIOp_AsmString;
LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
ShouldPrintRegisterTies, TiedOperandIdx, TRI,
IntrinsicInfo);
// Print HasSideEffects, MayLoad, MayStore, IsAlignStack
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
OS << " [sideeffect]";
if (ExtraInfo & InlineAsm::Extra_MayLoad)
OS << " [mayload]";
if (ExtraInfo & InlineAsm::Extra_MayStore)
OS << " [maystore]";
if (ExtraInfo & InlineAsm::Extra_IsConvergent)
OS << " [isconvergent]";
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
OS << " [alignstack]";
if (getInlineAsmDialect() == InlineAsm::AD_ATT)
OS << " [attdialect]";
if (getInlineAsmDialect() == InlineAsm::AD_Intel)
OS << " [inteldialect]";
StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
FirstOp = false;
}
for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (FirstOp) FirstOp = false; else OS << ",";
OS << " ";
if (isDebugValue() && MO.isMetadata()) {
// Pretty print DBG_VALUE instructions.
auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
if (DIV && !DIV->getName().empty())
OS << "!\"" << DIV->getName() << '\"';
else {
LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
unsigned TiedOperandIdx = getTiedOperandIdx(i);
MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
}
} else if (isDebugLabel() && MO.isMetadata()) {
// Pretty print DBG_LABEL instructions.
auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
if (DIL && !DIL->getName().empty())
OS << "\"" << DIL->getName() << '\"';
else {
LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
unsigned TiedOperandIdx = getTiedOperandIdx(i);
MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
}
} else if (i == AsmDescOp && MO.isImm()) {
// Pretty print the inline asm operand descriptor.
OS << '$' << AsmOpCount++;
unsigned Flag = MO.getImm();
switch (InlineAsm::getKind(Flag)) {
case InlineAsm::Kind_RegUse: OS << ":[reguse"; break;
case InlineAsm::Kind_RegDef: OS << ":[regdef"; break;
case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
case InlineAsm::Kind_Clobber: OS << ":[clobber"; break;
case InlineAsm::Kind_Imm: OS << ":[imm"; break;
case InlineAsm::Kind_Mem: OS << ":[mem"; break;
default: OS << ":[??" << InlineAsm::getKind(Flag); break;
}
unsigned RCID = 0;
if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
InlineAsm::hasRegClassConstraint(Flag, RCID)) {
if (TRI) {
OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
} else
OS << ":RC" << RCID;
}
if (InlineAsm::isMemKind(Flag)) {
unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
switch (MCID) {
case InlineAsm::Constraint_es: OS << ":es"; break;
case InlineAsm::Constraint_i: OS << ":i"; break;
case InlineAsm::Constraint_m: OS << ":m"; break;
case InlineAsm::Constraint_o: OS << ":o"; break;
case InlineAsm::Constraint_v: OS << ":v"; break;
case InlineAsm::Constraint_Q: OS << ":Q"; break;
case InlineAsm::Constraint_R: OS << ":R"; break;
case InlineAsm::Constraint_S: OS << ":S"; break;
case InlineAsm::Constraint_T: OS << ":T"; break;
case InlineAsm::Constraint_Um: OS << ":Um"; break;
case InlineAsm::Constraint_Un: OS << ":Un"; break;
case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
case InlineAsm::Constraint_Us: OS << ":Us"; break;
case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
case InlineAsm::Constraint_X: OS << ":X"; break;
case InlineAsm::Constraint_Z: OS << ":Z"; break;
case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
default: OS << ":?"; break;
}
}
unsigned TiedTo = 0;
if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
OS << " tiedto:$" << TiedTo;
OS << ']';
// Compute the index of the next operand descriptor.
AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
} else {
LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
unsigned TiedOperandIdx = getTiedOperandIdx(i);
if (MO.isImm() && isOperandSubregIdx(i))
MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
else
MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
}
}
// Print any optional symbols attached to this instruction as-if they were
// operands.
if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
if (!FirstOp) {
FirstOp = false;
OS << ',';
}
OS << " pre-instr-symbol ";
MachineOperand::printSymbol(OS, *PreInstrSymbol);
}
if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
if (!FirstOp) {
FirstOp = false;
OS << ',';
}
OS << " post-instr-symbol ";
MachineOperand::printSymbol(OS, *PostInstrSymbol);
+ }
+ if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
+ if (!FirstOp) {
+ FirstOp = false;
+ OS << ',';
+ }
+ OS << " heap-alloc-marker";
}
if (!SkipDebugLoc) {
if (const DebugLoc &DL = getDebugLoc()) {
if (!FirstOp)
OS << ',';
OS << " debug-location ";
DL->printAsOperand(OS, MST);
}
}
if (!memoperands_empty()) {
SmallVector<StringRef, 0> SSNs;
const LLVMContext *Context = nullptr;
std::unique_ptr<LLVMContext> CtxPtr;
const MachineFrameInfo *MFI = nullptr;
if (const MachineFunction *MF = getMFIfAvailable(*this)) {
MFI = &MF->getFrameInfo();
Context = &MF->getFunction().getContext();
} else {
CtxPtr = llvm::make_unique<LLVMContext>();
Context = CtxPtr.get();
}
OS << " :: ";
bool NeedComma = false;
for (const MachineMemOperand *Op : memoperands()) {
if (NeedComma)
OS << ", ";
Op->print(OS, MST, SSNs, *Context, MFI, TII);
NeedComma = true;
}
}
if (SkipDebugLoc)
return;
bool HaveSemi = false;
// Print debug location information.
if (const DebugLoc &DL = getDebugLoc()) {
if (!HaveSemi) {
OS << ';';
HaveSemi = true;
}
OS << ' ';
DL.print(OS);
}
// Print extra comments for DEBUG_VALUE.
if (isDebugValue() && getOperand(e - 2).isMetadata()) {
if (!HaveSemi) {
OS << ";";
HaveSemi = true;
}
auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
OS << " line no:" << DV->getLine();
if (auto *InlinedAt = debugLoc->getInlinedAt()) {
DebugLoc InlinedAtDL(InlinedAt);
if (InlinedAtDL && MF) {
OS << " inlined @[ ";
InlinedAtDL.print(OS);
OS << " ]";
}
}
if (isIndirectDebugValue())
OS << " indirect";
}
// TODO: DBG_LABEL
if (AddNewLine)
OS << '\n';
}
bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef())
continue;
// DEBUG_VALUE nodes do not contribute to code generation and should
// always be ignored. Failure to do so may result in trying to modify
// KILL flags on DEBUG_VALUE nodes.
if (MO.isDebug())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (Reg == IncomingReg) {
if (!Found) {
if (MO.isKill())
// The register is already marked kill.
return true;
if (isPhysReg && isRegTiedToDefOperand(i))
// Two-address uses of physregs must not be marked kill.
return true;
MO.setIsKill();
Found = true;
}
} else if (hasAliases && MO.isKill() &&
TargetRegisterInfo::isPhysicalRegister(Reg)) {
// A super-register kill already exists.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
if (RegInfo->isSubRegister(IncomingReg, Reg))
DeadOps.push_back(i);
}
}
// Trim unneeded kill operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit() &&
(!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsKill(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is killed. Add a
// new implicit operand if required.
if (!Found && AddIfNotFound) {
addOperand(MachineOperand::CreateReg(IncomingReg,
false /*IsDef*/,
true /*IsImp*/,
true /*IsKill*/));
return true;
}
return Found;
}
void MachineInstr::clearRegisterKills(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
RegInfo = nullptr;
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned OpReg = MO.getReg();
if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
MO.setIsKill(false);
}
}
bool MachineInstr::addRegisterDead(unsigned Reg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(Reg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MOReg == Reg) {
MO.setIsDead();
Found = true;
} else if (hasAliases && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
// There exists a super-register that's marked dead.
if (RegInfo->isSuperRegister(Reg, MOReg))
return true;
if (RegInfo->isSubRegister(Reg, MOReg))
DeadOps.push_back(i);
}
}
// Trim unneeded dead operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit() &&
(!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsDead(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is dead. Add a
// new implicit operand if required.
if (Found || !AddIfNotFound)
return Found;
addOperand(MachineOperand::CreateReg(Reg,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/,
true /*IsDead*/));
return true;
}
void MachineInstr::clearRegisterDeads(unsigned Reg) {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
continue;
MO.setIsDead(false);
}
}
void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
continue;
MO.setIsUndef(IsUndef);
}
}
void MachineInstr::addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
if (MO)
return;
} else {
for (const MachineOperand &MO : operands()) {
if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
MO.getSubReg() == 0)
return;
}
}
addOperand(MachineOperand::CreateReg(Reg,
true /*IsDef*/,
true /*IsImp*/));
}
void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI) {
bool HasRegMask = false;
for (MachineOperand &MO : operands()) {
if (MO.isRegMask()) {
HasRegMask = true;
continue;
}
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
// If there are no uses, including partial uses, the def is dead.
if (llvm::none_of(UsedRegs,
[&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
MO.setIsDead();
}
// This is a call with a register mask operand.
// Mask clobbers are always dead, so add defs for the non-dead defines.
if (HasRegMask)
for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
I != E; ++I)
addRegisterDefined(*I, &TRI);
}
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
// Build up a buffer of hash code components.
SmallVector<size_t, 8> HashComponents;
HashComponents.reserve(MI->getNumOperands() + 1);
HashComponents.push_back(MI->getOpcode());
for (const MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isDef() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
HashComponents.push_back(hash_value(MO));
}
return hash_combine_range(HashComponents.begin(), HashComponents.end());
}
void MachineInstr::emitError(StringRef Msg) const {
// Find the source location cookie.
unsigned LocCookie = 0;
const MDNode *LocMD = nullptr;
for (unsigned i = getNumOperands(); i != 0; --i) {
if (getOperand(i-1).isMetadata() &&
(LocMD = getOperand(i-1).getMetadata()) &&
LocMD->getNumOperands() != 0) {
if (const ConstantInt *CI =
mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
LocCookie = CI->getZExtValue();
break;
}
}
}
if (const MachineBasicBlock *MBB = getParent())
if (const MachineFunction *MF = MBB->getParent())
return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
report_fatal_error(Msg);
}
MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, const MDNode *Variable,
const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
if (IsIndirect)
MIB.addImm(0U);
else
MIB.addReg(0U, RegState::Debug);
return MIB.addMetadata(Variable).addMetadata(Expr);
}
MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID, bool IsIndirect,
MachineOperand &MO, const MDNode *Variable,
const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
if (MO.isReg())
return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
auto MIB = BuildMI(MF, DL, MCID).add(MO);
if (IsIndirect)
MIB.addImm(0U);
else
MIB.addReg(0U, RegState::Debug);
return MIB.addMetadata(Variable).addMetadata(Expr);
}
MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, const MCInstrDesc &MCID,
bool IsIndirect, unsigned Reg,
const MDNode *Variable, const MDNode *Expr) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI);
}
MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, const MCInstrDesc &MCID,
bool IsIndirect, MachineOperand &MO,
const MDNode *Variable, const MDNode *Expr) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
BB.insert(I, MI);
return MachineInstrBuilder(MF, *MI);
}
/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
assert(MI.getOperand(0).isReg() && "can't spill non-register");
assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
"Expected inlined-at fields to agree");
const DIExpression *Expr = MI.getDebugExpression();
if (MI.isIndirectDebugValue()) {
assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
}
return Expr;
}
MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
const MachineInstr &Orig,
int FrameIndex) {
const DIExpression *Expr = computeExprForSpill(Orig);
return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
.addFrameIndex(FrameIndex)
.addImm(0U)
.addMetadata(Orig.getDebugVariable())
.addMetadata(Expr);
}
void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
const DIExpression *Expr = computeExprForSpill(Orig);
Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
Orig.getOperand(1).ChangeToImmediate(0U);
Orig.getOperand(3).setMetadata(Expr);
}
void MachineInstr::collectDebugValues(
SmallVectorImpl<MachineInstr *> &DbgValues) {
MachineInstr &MI = *this;
if (!MI.getOperand(0).isReg())
return;
MachineBasicBlock::iterator DI = MI; ++DI;
for (MachineBasicBlock::iterator DE = MI.getParent()->end();
DI != DE; ++DI) {
if (!DI->isDebugValue())
return;
if (DI->getOperand(0).isReg() &&
DI->getOperand(0).getReg() == MI.getOperand(0).getReg())
DbgValues.push_back(&*DI);
}
}
void MachineInstr::changeDebugValuesDefReg(unsigned Reg) {
// Collect matching debug values.
SmallVector<MachineInstr *, 2> DbgValues;
collectDebugValues(DbgValues);
// Propagate Reg to debug value instructions.
for (auto *DBI : DbgValues)
DBI->getOperand(0).setReg(Reg);
}
using MMOList = SmallVector<const MachineMemOperand *, 2>;
static unsigned getSpillSlotSize(MMOList &Accesses,
const MachineFrameInfo &MFI) {
unsigned Size = 0;
for (auto A : Accesses)
if (MFI.isSpillSlotObjectIndex(
cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
->getFrameIndex()))
Size += A->getSize();
return Size;
}
Optional<unsigned>
MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isStoreToStackSlotPostFE(*this, FI)) {
const MachineFrameInfo &MFI = getMF()->getFrameInfo();
if (MFI.isSpillSlotObjectIndex(FI))
return (*memoperands_begin())->getSize();
}
return None;
}
Optional<unsigned>
MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
MMOList Accesses;
if (TII->hasStoreToStackSlot(*this, Accesses))
return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
return None;
}
Optional<unsigned>
MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
const MachineFrameInfo &MFI = getMF()->getFrameInfo();
if (MFI.isSpillSlotObjectIndex(FI))
return (*memoperands_begin())->getSize();
}
return None;
}
Optional<unsigned>
MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
MMOList Accesses;
if (TII->hasLoadFromStackSlot(*this, Accesses))
return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
return None;
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (revision 356465)
@@ -1,20838 +1,20838 @@
//===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
// both before and after the DAG is legalized.
//
// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
// primarily intended to handle simplification opportunities that are implicit
// in the LLVM IR and exposed by the various codegen lowering phases.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <string>
#include <tuple>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "dagcombine"
STATISTIC(NodesCombined , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
STATISTIC(OpsNarrowed , "Number of load/op/store narrowed");
STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int");
STATISTIC(SlicedLoads, "Number of load sliced");
STATISTIC(NumFPLogicOpsConv, "Number of logic ops converted to fp ops");
static cl::opt<bool>
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
cl::desc("Enable DAG combiner's use of IR alias analysis"));
static cl::opt<bool>
UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
cl::desc("Enable DAG combiner's use of TBAA"));
#ifndef NDEBUG
static cl::opt<std::string>
CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
cl::desc("Only use DAG-combiner alias analysis in this"
" function"));
#endif
/// Hidden option to stress test load slicing, i.e., when this option
/// is enabled, load slicing bypasses most of its profitability guards.
static cl::opt<bool>
StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
cl::desc("Bypass the profitability model of load slicing"),
cl::init(false));
static cl::opt<bool>
MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
cl::desc("DAG combiner may split indexing from loads"));
static cl::opt<unsigned> TokenFactorInlineLimit(
"combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048),
cl::desc("Limit the number of operands to inline for Token Factors"));
namespace {
class DAGCombiner {
SelectionDAG &DAG;
const TargetLowering &TLI;
CombineLevel Level;
CodeGenOpt::Level OptLevel;
bool LegalOperations = false;
bool LegalTypes = false;
bool ForCodeSize;
/// Worklist of all of the nodes that need to be simplified.
///
/// This must behave as a stack -- new nodes to process are pushed onto the
/// back and when processing we pop off of the back.
///
/// The worklist will not contain duplicates but may contain null entries
/// due to nodes being deleted from the underlying DAG.
SmallVector<SDNode *, 64> Worklist;
/// Mapping from an SDNode to its position on the worklist.
///
/// This is used to find and remove nodes from the worklist (by nulling
/// them) when they are deleted from the underlying DAG. It relies on
/// stable indices of nodes within the worklist.
DenseMap<SDNode *, unsigned> WorklistMap;
/// This records all nodes attempted to add to the worklist since we
/// considered a new worklist entry. As we keep do not add duplicate nodes
/// in the worklist, this is different from the tail of the worklist.
SmallSetVector<SDNode *, 32> PruningList;
/// Set of nodes which have been combined (at least once).
///
/// This is used to allow us to reliably add any operands of a DAG node
/// which have not yet been combined to the worklist.
SmallPtrSet<SDNode *, 32> CombinedNodes;
// AA - Used for DAG load/store alias analysis.
AliasAnalysis *AA;
/// When an instruction is simplified, add all users of the instruction to
/// the work lists because they might get more simplified now.
void AddUsersToWorklist(SDNode *N) {
for (SDNode *Node : N->uses())
AddToWorklist(Node);
}
// Prune potentially dangling nodes. This is called after
// any visit to a node, but should also be called during a visit after any
// failed combine which may have created a DAG node.
void clearAddedDanglingWorklistEntries() {
// Check any nodes added to the worklist to see if they are prunable.
while (!PruningList.empty()) {
auto *N = PruningList.pop_back_val();
if (N->use_empty())
recursivelyDeleteUnusedNodes(N);
}
}
SDNode *getNextWorklistEntry() {
// Before we do any work, remove nodes that are not in use.
clearAddedDanglingWorklistEntries();
SDNode *N = nullptr;
// The Worklist holds the SDNodes in order, but it may contain null
// entries.
while (!N && !Worklist.empty()) {
N = Worklist.pop_back_val();
}
if (N) {
bool GoodWorklistEntry = WorklistMap.erase(N);
(void)GoodWorklistEntry;
assert(GoodWorklistEntry &&
"Found a worklist entry without a corresponding map entry!");
}
return N;
}
/// Call the node-specific routine that folds each particular type of node.
SDValue visit(SDNode *N);
public:
DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), AA(AA) {
ForCodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
MaximumLegalStoreInBits = 0;
for (MVT VT : MVT::all_valuetypes())
if (EVT(VT).isSimple() && VT != MVT::Other &&
TLI.isTypeLegal(EVT(VT)) &&
VT.getSizeInBits() >= MaximumLegalStoreInBits)
MaximumLegalStoreInBits = VT.getSizeInBits();
}
void ConsiderForPruning(SDNode *N) {
// Mark this for potential pruning.
PruningList.insert(N);
}
/// Add to the worklist making sure its instance is at the back (next to be
/// processed.)
void AddToWorklist(SDNode *N) {
assert(N->getOpcode() != ISD::DELETED_NODE &&
"Deleted Node added to Worklist");
// Skip handle nodes as they can't usefully be combined and confuse the
// zero-use deletion strategy.
if (N->getOpcode() == ISD::HANDLENODE)
return;
ConsiderForPruning(N);
if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
Worklist.push_back(N);
}
/// Remove all instances of N from the worklist.
void removeFromWorklist(SDNode *N) {
CombinedNodes.erase(N);
PruningList.remove(N);
auto It = WorklistMap.find(N);
if (It == WorklistMap.end())
return; // Not in the worklist.
// Null out the entry rather than erasing it to avoid a linear operation.
Worklist[It->second] = nullptr;
WorklistMap.erase(It);
}
void deleteAndRecombine(SDNode *N);
bool recursivelyDeleteUnusedNodes(SDNode *N);
/// Replaces all uses of the results of one DAG node with new values.
SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo = true);
/// Replaces all uses of the results of one DAG node with new values.
SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
return CombineTo(N, &Res, 1, AddTo);
}
/// Replaces all uses of the results of one DAG node with new values.
SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
bool AddTo = true) {
SDValue To[] = { Res0, Res1 };
return CombineTo(N, To, 2, AddTo);
}
void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
private:
unsigned MaximumLegalStoreInBits;
/// Check the specified integer node value to see if it can be simplified or
/// if things it uses can be simplified by bit propagation.
/// If so, return true.
bool SimplifyDemandedBits(SDValue Op) {
unsigned BitWidth = Op.getScalarValueSizeInBits();
APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
return SimplifyDemandedBits(Op, DemandedBits);
}
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits) {
EVT VT = Op.getValueType();
unsigned NumElts = VT.isVector() ? VT.getVectorNumElements() : 1;
APInt DemandedElts = APInt::getAllOnesValue(NumElts);
return SimplifyDemandedBits(Op, DemandedBits, DemandedElts);
}
/// Check the specified vector node value to see if it can be simplified or
/// if things it uses can be simplified as it only uses some of the
/// elements. If so, return true.
bool SimplifyDemandedVectorElts(SDValue Op) {
unsigned NumElts = Op.getValueType().getVectorNumElements();
APInt DemandedElts = APInt::getAllOnesValue(NumElts);
return SimplifyDemandedVectorElts(Op, DemandedElts);
}
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
const APInt &DemandedElts);
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
bool AssumeSingleUse = false);
bool CombineToPreIndexedLoadStore(SDNode *N);
bool CombineToPostIndexedLoadStore(SDNode *N);
SDValue SplitIndexingFromLoad(LoadSDNode *LD);
bool SliceUpLoad(SDNode *N);
// Scalars have size 0 to distinguish from singleton vectors.
SDValue ForwardStoreValueToDirectLoad(LoadSDNode *LD);
bool getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val);
bool extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val);
/// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
/// load.
///
/// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
/// \param InVecVT type of the input vector to EVE with bitcasts resolved.
/// \param EltNo index of the vector element to load.
/// \param OriginalLoad load that EVE came from to be replaced.
/// \returns EVE on success SDValue() on failure.
SDValue scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
SDValue EltNo,
LoadSDNode *OriginalLoad);
void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
SDValue PromoteIntBinOp(SDValue Op);
SDValue PromoteIntShiftOp(SDValue Op);
SDValue PromoteExtend(SDValue Op);
bool PromoteLoad(SDValue Op);
/// Call the node-specific routine that knows how to fold each
/// particular type of node. If that doesn't do anything, try the
/// target-specific DAG combines.
SDValue combine(SDNode *N);
// Visitation implementation - Implement dag node combining for different
// node types. The semantics are as follows:
// Return Value:
// SDValue.getNode() == 0 - No change was made
// SDValue.getNode() == N - N was replaced, is dead and has been handled.
// otherwise - N should be replaced by the returned Operand.
//
SDValue visitTokenFactor(SDNode *N);
SDValue visitMERGE_VALUES(SDNode *N);
SDValue visitADD(SDNode *N);
SDValue visitADDLike(SDNode *N);
SDValue visitADDLikeCommutative(SDValue N0, SDValue N1, SDNode *LocReference);
SDValue visitSUB(SDNode *N);
SDValue visitADDSAT(SDNode *N);
SDValue visitSUBSAT(SDNode *N);
SDValue visitADDC(SDNode *N);
SDValue visitADDO(SDNode *N);
SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitSUBC(SDNode *N);
SDValue visitSUBO(SDNode *N);
SDValue visitADDE(SDNode *N);
SDValue visitADDCARRY(SDNode *N);
SDValue visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, SDNode *N);
SDValue visitSUBE(SDNode *N);
SDValue visitSUBCARRY(SDNode *N);
SDValue visitMUL(SDNode *N);
SDValue useDivRem(SDNode *N);
SDValue visitSDIV(SDNode *N);
SDValue visitSDIVLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitUDIV(SDNode *N);
SDValue visitUDIVLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitREM(SDNode *N);
SDValue visitMULHU(SDNode *N);
SDValue visitMULHS(SDNode *N);
SDValue visitSMUL_LOHI(SDNode *N);
SDValue visitUMUL_LOHI(SDNode *N);
SDValue visitMULO(SDNode *N);
SDValue visitIMINMAX(SDNode *N);
SDValue visitAND(SDNode *N);
SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitOR(SDNode *N);
SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitXOR(SDNode *N);
SDValue SimplifyVBinOp(SDNode *N);
SDValue visitSHL(SDNode *N);
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitFunnelShift(SDNode *N);
SDValue visitRotate(SDNode *N);
SDValue visitABS(SDNode *N);
SDValue visitBSWAP(SDNode *N);
SDValue visitBITREVERSE(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTPOP(SDNode *N);
SDValue visitSELECT(SDNode *N);
SDValue visitVSELECT(SDNode *N);
SDValue visitSELECT_CC(SDNode *N);
SDValue visitSETCC(SDNode *N);
SDValue visitSETCCCARRY(SDNode *N);
SDValue visitSIGN_EXTEND(SDNode *N);
SDValue visitZERO_EXTEND(SDNode *N);
SDValue visitANY_EXTEND(SDNode *N);
SDValue visitAssertExt(SDNode *N);
SDValue visitSIGN_EXTEND_INREG(SDNode *N);
SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N);
SDValue visitTRUNCATE(SDNode *N);
SDValue visitBITCAST(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N);
SDValue visitFADD(SDNode *N);
SDValue visitFSUB(SDNode *N);
SDValue visitFMUL(SDNode *N);
SDValue visitFMA(SDNode *N);
SDValue visitFDIV(SDNode *N);
SDValue visitFREM(SDNode *N);
SDValue visitFSQRT(SDNode *N);
SDValue visitFCOPYSIGN(SDNode *N);
SDValue visitFPOW(SDNode *N);
SDValue visitSINT_TO_FP(SDNode *N);
SDValue visitUINT_TO_FP(SDNode *N);
SDValue visitFP_TO_SINT(SDNode *N);
SDValue visitFP_TO_UINT(SDNode *N);
SDValue visitFP_ROUND(SDNode *N);
SDValue visitFP_ROUND_INREG(SDNode *N);
SDValue visitFP_EXTEND(SDNode *N);
SDValue visitFNEG(SDNode *N);
SDValue visitFABS(SDNode *N);
SDValue visitFCEIL(SDNode *N);
SDValue visitFTRUNC(SDNode *N);
SDValue visitFFLOOR(SDNode *N);
SDValue visitFMINNUM(SDNode *N);
SDValue visitFMAXNUM(SDNode *N);
SDValue visitFMINIMUM(SDNode *N);
SDValue visitFMAXIMUM(SDNode *N);
SDValue visitBRCOND(SDNode *N);
SDValue visitBR_CC(SDNode *N);
SDValue visitLOAD(SDNode *N);
SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain);
SDValue replaceStoreOfFPConstant(StoreSDNode *ST);
SDValue visitSTORE(SDNode *N);
SDValue visitLIFETIME_END(SDNode *N);
SDValue visitINSERT_VECTOR_ELT(SDNode *N);
SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
SDValue visitBUILD_VECTOR(SDNode *N);
SDValue visitCONCAT_VECTORS(SDNode *N);
SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
SDValue visitSCALAR_TO_VECTOR(SDNode *N);
SDValue visitINSERT_SUBVECTOR(SDNode *N);
SDValue visitMLOAD(SDNode *N);
SDValue visitMSTORE(SDNode *N);
SDValue visitMGATHER(SDNode *N);
SDValue visitMSCATTER(SDNode *N);
SDValue visitFP_TO_FP16(SDNode *N);
SDValue visitFP16_TO_FP(SDNode *N);
SDValue visitVECREDUCE(SDNode *N);
SDValue visitFADDForFMACombine(SDNode *N);
SDValue visitFSUBForFMACombine(SDNode *N);
SDValue visitFMULForFMADistributiveCombine(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
bool reassociationCanBreakAddressingModePattern(unsigned Opc,
const SDLoc &DL, SDValue N0,
SDValue N1);
SDValue reassociateOpsCommutative(unsigned Opc, const SDLoc &DL, SDValue N0,
SDValue N1);
SDValue reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
SDValue N1, SDNodeFlags Flags);
SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
SDValue foldSelectOfConstants(SDNode *N);
SDValue foldVSelectOfConstants(SDNode *N);
SDValue foldBinOpIntoSelect(SDNode *BO);
bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
SDValue hoistLogicOpWithSameOpcodeHands(SDNode *N);
SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2);
SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC,
bool NotExtCompare = false);
SDValue convertSelectOfFPConstantsToLoadOffset(
const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
ISD::CondCode CC);
SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC);
SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
const SDLoc &DL);
SDValue unfoldMaskedMerge(SDNode *N);
SDValue unfoldExtremeBitClearingToShifts(SDNode *N);
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
const SDLoc &DL, bool foldBooleans);
SDValue rebuildSetCC(SDValue N);
bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
SDValue &CC) const;
bool isOneUseSetCC(SDValue N) const;
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
SDValue CombineExtLoad(SDNode *N);
SDValue CombineZExtLogicopShiftLoad(SDNode *N);
SDValue combineRepeatedFPDivisors(SDNode *N);
SDValue combineInsertEltToShuffle(SDNode *N, unsigned InsIndex);
SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N);
SDValue BuildSDIVPow2(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDValue BuildLogBase2(SDValue V, const SDLoc &DL);
SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags);
SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip);
SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations,
SDNodeFlags Flags, bool Reciprocal);
SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations,
SDNodeFlags Flags, bool Reciprocal);
SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits = true);
SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
SDValue InnerPos, SDValue InnerNeg,
unsigned PosOpcode, unsigned NegOpcode,
const SDLoc &DL);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
SDValue MatchLoadCombine(SDNode *N);
SDValue MatchStoreCombine(StoreSDNode *N);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue splitMergedValStore(StoreSDNode *ST);
SDValue TransformFPLoadStorePair(SDNode *N);
SDValue convertBuildVecZextToZext(SDNode *N);
SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
SDValue reduceBuildVecToShuffle(SDNode *N);
SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
ArrayRef<int> VectorMask, SDValue VecIn1,
SDValue VecIn2, unsigned LeftIdx,
bool DidSplitVec);
SDValue matchVSelectOpSizesWithSetCC(SDNode *Cast);
/// Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallVectorImpl<SDValue> &Aliases);
/// Return true if there is any possibility that the two addresses overlap.
bool isAlias(SDNode *Op0, SDNode *Op1) const;
/// Walk up chain skipping non-aliasing memory nodes, looking for a better
/// chain (aliasing node.)
SDValue FindBetterChain(SDNode *N, SDValue Chain);
/// Try to replace a store and any possibly adjacent stores on
/// consecutive chains with better chains. Return true only if St is
/// replaced.
///
/// Notice that other chains may still be replaced even if the function
/// returns false.
bool findBetterNeighborChains(StoreSDNode *St);
// Helper for findBetterNeighborChains. Walk up store chain add additional
// chained stores that do not overlap and can be parallelized.
bool parallelizeChainedStores(StoreSDNode *St);
/// Holds a pointer to an LSBaseSDNode as well as information on where it
/// is located in a sequence of memory operations connected by a chain.
struct MemOpLink {
// Ptr to the mem node.
LSBaseSDNode *MemNode;
// Offset from the base ptr.
int64_t OffsetFromBase;
MemOpLink(LSBaseSDNode *N, int64_t Offset)
: MemNode(N), OffsetFromBase(Offset) {}
};
/// This is a helper function for visitMUL to check the profitability
/// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
/// MulNode is the original multiply, AddNode is (add x, c1),
/// and ConstNode is c2.
bool isMulAddWithConstProfitable(SDNode *MulNode,
SDValue &AddNode,
SDValue &ConstNode);
/// This is a helper function for visitAND and visitZERO_EXTEND. Returns
/// true if the (and (load x) c) pattern matches an extload. ExtVT returns
/// the type of the loaded value to be extended.
bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
EVT LoadResultTy, EVT &ExtVT);
/// Helper function to calculate whether the given Load/Store can have its
/// width reduced to ExtVT.
bool isLegalNarrowLdSt(LSBaseSDNode *LDSTN, ISD::LoadExtType ExtType,
EVT &MemVT, unsigned ShAmt = 0);
/// Used by BackwardsPropagateMask to find suitable loads.
bool SearchForAndLoads(SDNode *N, SmallVectorImpl<LoadSDNode*> &Loads,
SmallPtrSetImpl<SDNode*> &NodesWithConsts,
ConstantSDNode *Mask, SDNode *&NodeToMask);
/// Attempt to propagate a given AND node back to load leaves so that they
/// can be combined into narrow loads.
bool BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG);
/// Helper function for MergeConsecutiveStores which merges the
/// component store chains.
SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
unsigned NumStores);
/// This is a helper function for MergeConsecutiveStores. When the
/// source elements of the consecutive stores are all constants or
/// all extracted vector elements, try to merge them into one
/// larger store introducing bitcasts if necessary. \return True
/// if a merged store was created.
bool MergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes,
EVT MemVT, unsigned NumStores,
bool IsConstantSrc, bool UseVector,
bool UseTrunc);
/// This is a helper function for MergeConsecutiveStores. Stores
/// that potentially may be merged with St are placed in
/// StoreNodes. RootNode is a chain predecessor to all store
/// candidates.
void getStoreMergeCandidates(StoreSDNode *St,
SmallVectorImpl<MemOpLink> &StoreNodes,
SDNode *&Root);
/// Helper function for MergeConsecutiveStores. Checks if
/// candidate stores have indirect dependency through their
/// operands. RootNode is the predecessor to all stores calculated
/// by getStoreMergeCandidates and is used to prune the dependency check.
/// \return True if safe to merge.
bool checkMergeStoreCandidatesForDependencies(
SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
SDNode *RootNode);
/// Merge consecutive store operations into a wide store.
/// This optimization uses wide integers or vectors when possible.
/// \return number of stores that were merged into a merged store (the
/// affected nodes are stored as a prefix in \p StoreNodes).
bool MergeConsecutiveStores(StoreSDNode *St);
/// Try to transform a truncation where C is a constant:
/// (trunc (and X, C)) -> (and (trunc X), (trunc C))
///
/// \p N needs to be a truncation and its first operand an AND. Other
/// requirements are checked by the function (e.g. that trunc is
/// single-use) and if missed an empty SDValue is returned.
SDValue distributeTruncateThroughAnd(SDNode *N);
/// Helper function to determine whether the target supports operation
/// given by \p Opcode for type \p VT, that is, whether the operation
/// is legal or custom before legalizing operations, and whether is
/// legal (but not custom) after legalization.
bool hasOperation(unsigned Opcode, EVT VT) {
if (LegalOperations)
return TLI.isOperationLegal(Opcode, VT);
return TLI.isOperationLegalOrCustom(Opcode, VT);
}
public:
/// Runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel);
SelectionDAG &getDAG() const { return DAG; }
/// Returns a type large enough to hold any valid shift amount - before type
/// legalization these can be huge.
EVT getShiftAmountTy(EVT LHSTy) {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
return TLI.getShiftAmountTy(LHSTy, DAG.getDataLayout(), LegalTypes);
}
/// This method returns true if we are running before type legalization or
/// if the specified VT is legal.
bool isTypeLegal(const EVT &VT) {
if (!LegalTypes) return true;
return TLI.isTypeLegal(VT);
}
/// Convenience wrapper around TargetLowering::getSetCCResultType
EVT getSetCCResultType(EVT VT) const {
return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue OrigLoad, SDValue ExtLoad,
ISD::NodeType ExtType);
};
/// This class is a DAGUpdateListener that removes any deleted
/// nodes from the worklist.
class WorklistRemover : public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
explicit WorklistRemover(DAGCombiner &dc)
: SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
void NodeDeleted(SDNode *N, SDNode *E) override {
DC.removeFromWorklist(N);
}
};
class WorklistInserter : public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
explicit WorklistInserter(DAGCombiner &dc)
: SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
// FIXME: Ideally we could add N to the worklist, but this causes exponential
// compile time costs in large DAGs, e.g. Halide.
void NodeInserted(SDNode *N) override { DC.ConsiderForPruning(N); }
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// TargetLowering::DAGCombinerInfo implementation
//===----------------------------------------------------------------------===//
void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
((DAGCombiner*)DC)->AddToWorklist(N);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
}
void TargetLowering::DAGCombinerInfo::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
}
//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//
void DAGCombiner::deleteAndRecombine(SDNode *N) {
removeFromWorklist(N);
// If the operands of this node are only used by the node, they will now be
// dead. Make sure to re-visit them and recursively delete dead nodes.
for (const SDValue &Op : N->ops())
// For an operand generating multiple values, one of the values may
// become dead allowing further simplification (e.g. split index
// arithmetic from an indexed load).
if (Op->hasOneUse() || Op->getNumValues() > 1)
AddToWorklist(Op.getNode());
DAG.DeleteNode(N);
}
/// Return 1 if we can compute the negated form of the specified expression for
/// the same cost as the expression itself, or 2 if we can compute the negated
/// form more cheaply than the expression itself.
static char isNegatibleForFree(SDValue Op, bool LegalOperations,
const TargetLowering &TLI,
const TargetOptions *Options,
bool ForCodeSize,
unsigned Depth = 0) {
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG)
return 2;
// Don't allow anything with multiple uses unless we know it is free.
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
if (!Op.hasOneUse() &&
!(Op.getOpcode() == ISD::FP_EXTEND &&
TLI.isFPExtFree(VT, Op.getOperand(0).getValueType())))
return 0;
// Don't recurse exponentially.
if (Depth > 6)
return 0;
switch (Op.getOpcode()) {
default: return false;
case ISD::ConstantFP: {
if (!LegalOperations)
return 1;
// Don't invert constant FP values after legalization unless the target says
// the negated constant is legal.
return TLI.isOperationLegal(ISD::ConstantFP, VT) ||
TLI.isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT,
ForCodeSize);
}
case ISD::BUILD_VECTOR: {
// Only permit BUILD_VECTOR of constants.
if (llvm::any_of(Op->op_values(), [&](SDValue N) {
return !N.isUndef() && !isa<ConstantFPSDNode>(N);
}))
return 0;
if (!LegalOperations)
return 1;
if (TLI.isOperationLegal(ISD::ConstantFP, VT) &&
TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return 1;
return llvm::all_of(Op->op_values(), [&](SDValue N) {
return N.isUndef() ||
TLI.isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
ForCodeSize);
});
}
case ISD::FADD:
if (!Options->UnsafeFPMath && !Flags.hasNoSignedZeros())
return 0;
// After operation legalization, it might not be legal to create new FSUBs.
if (LegalOperations && !TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
return 0;
// fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
Options, ForCodeSize, Depth + 1))
return V;
// fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
ForCodeSize, Depth + 1);
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
if (!Options->NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
return 0;
// fold (fneg (fsub A, B)) -> (fsub B, A)
return 1;
case ISD::FMUL:
case ISD::FDIV:
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
Options, ForCodeSize, Depth + 1))
return V;
return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
ForCodeSize, Depth + 1);
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FSIN:
return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options,
ForCodeSize, Depth + 1);
}
}
/// If isNegatibleForFree returns true, return the newly negated expression.
static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
bool LegalOperations, bool ForCodeSize,
unsigned Depth = 0) {
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG)
return Op.getOperand(0);
assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = Op->getFlags();
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown code");
case ISD::ConstantFP: {
APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
V.changeSign();
return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType());
}
case ISD::BUILD_VECTOR: {
SmallVector<SDValue, 4> Ops;
for (SDValue C : Op->op_values()) {
if (C.isUndef()) {
Ops.push_back(C);
continue;
}
APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF();
V.changeSign();
Ops.push_back(DAG.getConstantFP(V, SDLoc(Op), C.getValueType()));
}
return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Ops);
}
case ISD::FADD:
assert(Options.UnsafeFPMath || Flags.hasNoSignedZeros());
// fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
DAG.getTargetLoweringInfo(), &Options, ForCodeSize,
Depth + 1))
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, ForCodeSize,
Depth + 1),
Op.getOperand(1), Flags);
// fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(1), DAG,
LegalOperations, ForCodeSize,
Depth + 1),
Op.getOperand(0), Flags);
case ISD::FSUB:
// fold (fneg (fsub 0, B)) -> B
if (ConstantFPSDNode *N0CFP =
isConstOrConstSplatFP(Op.getOperand(0), /*AllowUndefs*/ true))
if (N0CFP->isZero())
return Op.getOperand(1);
// fold (fneg (fsub A, B)) -> (fsub B, A)
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(0), Flags);
case ISD::FMUL:
case ISD::FDIV:
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
DAG.getTargetLoweringInfo(), &Options, ForCodeSize,
Depth + 1))
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, ForCodeSize,
Depth + 1),
Op.getOperand(1), Flags);
// fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
Op.getOperand(0),
GetNegatedExpression(Op.getOperand(1), DAG,
LegalOperations, ForCodeSize,
Depth + 1), Flags);
case ISD::FP_EXTEND:
case ISD::FSIN:
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, ForCodeSize,
Depth + 1));
case ISD::FP_ROUND:
return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, ForCodeSize,
Depth + 1),
Op.getOperand(1));
}
}
// APInts must be the same size for most operations, this helper
// function zero extends the shorter of the pair so that they match.
// We provide an Offset so that we can create bitwidths that won't overflow.
static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {
unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
LHS = LHS.zextOrSelf(Bits);
RHS = RHS.zextOrSelf(Bits);
}
// Return true if this node is a setcc, or is a select_cc
// that selects between the target values used for true and false, making it
// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
// the appropriate nodes based on the type of node we are checking. This
// simplifies life a bit for the callers.
bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
SDValue &CC) const {
if (N.getOpcode() == ISD::SETCC) {
LHS = N.getOperand(0);
RHS = N.getOperand(1);
CC = N.getOperand(2);
return true;
}
if (N.getOpcode() != ISD::SELECT_CC ||
!TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
!TLI.isConstFalseVal(N.getOperand(3).getNode()))
return false;
if (TLI.getBooleanContents(N.getValueType()) ==
TargetLowering::UndefinedBooleanContent)
return false;
LHS = N.getOperand(0);
RHS = N.getOperand(1);
CC = N.getOperand(4);
return true;
}
/// Return true if this is a SetCC-equivalent operation with only one use.
/// If this is true, it allows the users to invert the operation for free when
/// it is profitable to do so.
bool DAGCombiner::isOneUseSetCC(SDValue N) const {
SDValue N0, N1, N2;
if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
return true;
return false;
}
// Returns the SDNode if it is a constant float BuildVector
// or constant float.
static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) {
if (isa<ConstantFPSDNode>(N))
return N.getNode();
if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
return N.getNode();
return nullptr;
}
// Determines if it is a constant integer or a build vector of constant
// integers (and undefs).
// Do not permit build vector implicit truncation.
static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N))
return !(Const->isOpaque() && NoOpaques);
if (N.getOpcode() != ISD::BUILD_VECTOR)
return false;
unsigned BitWidth = N.getScalarValueSizeInBits();
for (const SDValue &Op : N->op_values()) {
if (Op.isUndef())
continue;
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
(Const->isOpaque() && NoOpaques))
return false;
}
return true;
}
// Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
// undef's.
static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {
if (V.getOpcode() != ISD::BUILD_VECTOR)
return false;
return isConstantOrConstantVector(V, NoOpaques) ||
ISD::isBuildVectorOfConstantFPSDNodes(V.getNode());
}
bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc,
const SDLoc &DL,
SDValue N0,
SDValue N1) {
// Currently this only tries to ensure we don't undo the GEP splits done by
// CodeGenPrepare when shouldConsiderGEPOffsetSplit is true. To ensure this,
// we check if the following transformation would be problematic:
// (load/store (add, (add, x, offset1), offset2)) ->
// (load/store (add, x, offset1+offset2)).
if (Opc != ISD::ADD || N0.getOpcode() != ISD::ADD)
return false;
if (N0.hasOneUse())
return false;
auto *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
auto *C2 = dyn_cast<ConstantSDNode>(N1);
if (!C1 || !C2)
return false;
const APInt &C1APIntVal = C1->getAPIntValue();
const APInt &C2APIntVal = C2->getAPIntValue();
if (C1APIntVal.getBitWidth() > 64 || C2APIntVal.getBitWidth() > 64)
return false;
const APInt CombinedValueIntVal = C1APIntVal + C2APIntVal;
if (CombinedValueIntVal.getBitWidth() > 64)
return false;
const int64_t CombinedValue = CombinedValueIntVal.getSExtValue();
for (SDNode *Node : N0->uses()) {
auto LoadStore = dyn_cast<MemSDNode>(Node);
if (LoadStore) {
// Is x[offset2] already not a legal addressing mode? If so then
// reassociating the constants breaks nothing (we test offset2 because
// that's the one we hope to fold into the load or store).
TargetLoweringBase::AddrMode AM;
AM.HasBaseReg = true;
AM.BaseOffs = C2APIntVal.getSExtValue();
EVT VT = LoadStore->getMemoryVT();
unsigned AS = LoadStore->getAddressSpace();
Type *AccessTy = VT.getTypeForEVT(*DAG.getContext());
if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
continue;
// Would x[offset1+offset2] still be a legal addressing mode?
AM.BaseOffs = CombinedValue;
if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
return true;
}
}
return false;
}
// Helper for DAGCombiner::reassociateOps. Try to reassociate an expression
// such as (Opc N0, N1), if \p N0 is the same kind of operation as \p Opc.
SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
SDValue N0, SDValue N1) {
EVT VT = N0.getValueType();
if (N0.getOpcode() != Opc)
return SDValue();
// Don't reassociate reductions.
if (N0->getFlags().hasVectorReduction())
return SDValue();
if (SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
if (SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
// Reassociate: (op (op x, c1), c2) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, C1, C2))
return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
return SDValue();
}
if (N0.hasOneUse()) {
// Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
// iff (op x, c1) has one use
SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
if (!OpNode.getNode())
return SDValue();
AddToWorklist(OpNode.getNode());
return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
}
}
return SDValue();
}
// Try to reassociate commutative binops.
SDValue DAGCombiner::reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
SDValue N1, SDNodeFlags Flags) {
assert(TLI.isCommutativeBinOp(Opc) && "Operation not commutative.");
// Don't reassociate reductions.
if (Flags.hasVectorReduction())
return SDValue();
// Floating-point reassociation is not allowed without loose FP math.
if (N0.getValueType().isFloatingPoint() ||
N1.getValueType().isFloatingPoint())
if (!Flags.hasAllowReassociation() || !Flags.hasNoSignedZeros())
return SDValue();
if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N0, N1))
return Combined;
if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N1, N0))
return Combined;
return SDValue();
}
SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo) {
assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
++NodesCombined;
LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: ";
To[0].getNode()->dump(&DAG);
dbgs() << " and " << NumTo - 1 << " other values\n");
for (unsigned i = 0, e = NumTo; i != e; ++i)
assert((!To[i].getNode() ||
N->getValueType(i) == To[i].getValueType()) &&
"Cannot combine value to value of different type!");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesWith(N, To);
if (AddTo) {
// Push the new nodes and any users onto the worklist
for (unsigned i = 0, e = NumTo; i != e; ++i) {
if (To[i].getNode()) {
AddToWorklist(To[i].getNode());
AddUsersToWorklist(To[i].getNode());
}
}
}
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node.
if (N->use_empty())
deleteAndRecombine(N);
return SDValue(N, 0);
}
void DAGCombiner::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
// Replace all uses. If any nodes become isomorphic to other nodes and
// are deleted, make sure to remove them from our worklist.
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
// Push the new node and any (possibly new) users onto the worklist.
AddToWorklist(TLO.New.getNode());
AddUsersToWorklist(TLO.New.getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node.
if (TLO.Old.getNode()->use_empty())
deleteAndRecombine(TLO.Old.getNode());
}
/// Check the specified integer node value to see if it can be simplified or if
/// things it uses can be simplified by bit propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
const APInt &DemandedElts) {
TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
KnownBits Known;
if (!TLI.SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO))
return false;
// Revisit the node.
AddToWorklist(Op.getNode());
// Replace the old value with the new one.
++NodesCombined;
LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
dbgs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
}
/// Check the specified vector node value to see if it can be simplified or
/// if things it uses can be simplified as it only uses some of the elements.
/// If so, return true.
bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op,
const APInt &DemandedElts,
bool AssumeSingleUse) {
TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
APInt KnownUndef, KnownZero;
if (!TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero,
TLO, 0, AssumeSingleUse))
return false;
// Revisit the node.
AddToWorklist(Op.getNode());
// Replace the old value with the new one.
++NodesCombined;
LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
dbgs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
}
void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
SDLoc DL(Load);
EVT VT = Load->getValueType(0);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0));
LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: ";
Trunc.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
deleteAndRecombine(Load);
AddToWorklist(Trunc.getNode());
}
SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
Replace = false;
SDLoc DL(Op);
if (ISD::isUNINDEXEDLoad(Op.getNode())) {
LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
: LD->getExtensionType();
Replace = true;
return DAG.getExtLoad(ExtType, DL, PVT,
LD->getChain(), LD->getBasePtr(),
MemVT, LD->getMemOperand());
}
unsigned Opc = Op.getOpcode();
switch (Opc) {
default: break;
case ISD::AssertSext:
if (SDValue Op0 = SExtPromoteOperand(Op.getOperand(0), PVT))
return DAG.getNode(ISD::AssertSext, DL, PVT, Op0, Op.getOperand(1));
break;
case ISD::AssertZext:
if (SDValue Op0 = ZExtPromoteOperand(Op.getOperand(0), PVT))
return DAG.getNode(ISD::AssertZext, DL, PVT, Op0, Op.getOperand(1));
break;
case ISD::Constant: {
unsigned ExtOpc =
Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
return DAG.getNode(ExtOpc, DL, PVT, Op);
}
}
if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
return SDValue();
return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op);
}
SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
return SDValue();
EVT OldVT = Op.getValueType();
SDLoc DL(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
if (!NewOp.getNode())
return SDValue();
AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp,
DAG.getValueType(OldVT));
}
SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
EVT OldVT = Op.getValueType();
SDLoc DL(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
if (!NewOp.getNode())
return SDValue();
AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
return DAG.getZeroExtendInReg(NewOp, DL, OldVT);
}
/// Promote the specified integer binary operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
bool Replace0 = false;
SDValue N0 = Op.getOperand(0);
SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
bool Replace1 = false;
SDValue N1 = Op.getOperand(1);
SDValue NN1 = PromoteOperand(N1, PVT, Replace1);
SDLoc DL(Op);
SDValue RV =
DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1));
// We are always replacing N0/N1's use in N and only need
// additional replacements if there are additional uses.
Replace0 &= !N0->hasOneUse();
Replace1 &= (N0 != N1) && !N1->hasOneUse();
// Combine Op here so it is preserved past replacements.
CombineTo(Op.getNode(), RV);
// If operands have a use ordering, make sure we deal with
// predecessor first.
if (Replace0 && Replace1 && N0.getNode()->isPredecessorOf(N1.getNode())) {
std::swap(N0, N1);
std::swap(NN0, NN1);
}
if (Replace0) {
AddToWorklist(NN0.getNode());
ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
}
if (Replace1) {
AddToWorklist(NN1.getNode());
ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
}
return Op;
}
return SDValue();
}
/// Promote the specified integer shift operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
bool Replace = false;
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
if (Opc == ISD::SRA)
N0 = SExtPromoteOperand(N0, PVT);
else if (Opc == ISD::SRL)
N0 = ZExtPromoteOperand(N0, PVT);
else
N0 = PromoteOperand(N0, PVT, Replace);
if (!N0.getNode())
return SDValue();
SDLoc DL(Op);
SDValue RV =
DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1));
AddToWorklist(N0.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
// Deal with Op being deleted.
if (Op && Op.getOpcode() != ISD::DELETED_NODE)
return RV;
}
return SDValue();
}
SDValue DAGCombiner::PromoteExtend(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
}
return SDValue();
}
bool DAGCombiner::PromoteLoad(SDValue Op) {
if (!LegalOperations)
return false;
if (!ISD::isUNINDEXEDLoad(Op.getNode()))
return false;
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return false;
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return false;
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
SDLoc DL(Op);
SDNode *N = Op.getNode();
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
: LD->getExtensionType();
SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT,
LD->getChain(), LD->getBasePtr(),
MemVT, LD->getMemOperand());
SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: ";
Result.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
deleteAndRecombine(N);
AddToWorklist(Result.getNode());
return true;
}
return false;
}
/// Recursively delete a node which has no uses and any operands for
/// which it is the only use.
///
/// Note that this both deletes the nodes and removes them from the worklist.
/// It also adds any nodes who have had a user deleted to the worklist as they
/// may now have only one use and subject to other combines.
bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
if (!N->use_empty())
return false;
SmallSetVector<SDNode *, 16> Nodes;
Nodes.insert(N);
do {
N = Nodes.pop_back_val();
if (!N)
continue;
if (N->use_empty()) {
for (const SDValue &ChildN : N->op_values())
Nodes.insert(ChildN.getNode());
removeFromWorklist(N);
DAG.DeleteNode(N);
} else {
AddToWorklist(N);
}
} while (!Nodes.empty());
return true;
}
//===----------------------------------------------------------------------===//
// Main DAG Combiner implementation
//===----------------------------------------------------------------------===//
void DAGCombiner::Run(CombineLevel AtLevel) {
// set the instance variables, so that the various visit routines may use it.
Level = AtLevel;
LegalOperations = Level >= AfterLegalizeVectorOps;
LegalTypes = Level >= AfterLegalizeTypes;
WorklistInserter AddNodes(*this);
// Add all the dag nodes to the worklist.
for (SDNode &Node : DAG.allnodes())
AddToWorklist(&Node);
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
// changes of the root.
HandleSDNode Dummy(DAG.getRoot());
// While we have a valid worklist entry node, try to combine it.
while (SDNode *N = getNextWorklistEntry()) {
// If N has no uses, it is dead. Make sure to revisit all N's operands once
// N is deleted from the DAG, since they too may now be dead or may have a
// reduced number of uses, allowing other xforms.
if (recursivelyDeleteUnusedNodes(N))
continue;
WorklistRemover DeadNodes(*this);
// If this combine is running after legalizing the DAG, re-legalize any
// nodes pulled off the worklist.
if (Level == AfterLegalizeDAG) {
SmallSetVector<SDNode *, 16> UpdatedNodes;
bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);
for (SDNode *LN : UpdatedNodes) {
AddToWorklist(LN);
AddUsersToWorklist(LN);
}
if (!NIsValid)
continue;
}
LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));
// Add any operands of the new node which have not yet been combined to the
// worklist as well. Because the worklist uniques things already, this
// won't repeatedly process the same operand.
CombinedNodes.insert(N);
for (const SDValue &ChildN : N->op_values())
if (!CombinedNodes.count(ChildN.getNode()))
AddToWorklist(ChildN.getNode());
SDValue RV = combine(N);
if (!RV.getNode())
continue;
++NodesCombined;
// If we get back the same node we passed in, rather than a new node or
// zero, we know that the node must have defined multiple values and
// CombineTo was used. Since CombineTo takes care of the worklist
// mechanics for us, we have no work to do in this case.
if (RV.getNode() == N)
continue;
assert(N->getOpcode() != ISD::DELETED_NODE &&
RV.getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!");
LLVM_DEBUG(dbgs() << " ... into: "; RV.getNode()->dump(&DAG));
if (N->getNumValues() == RV.getNode()->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode());
else {
assert(N->getValueType(0) == RV.getValueType() &&
N->getNumValues() == 1 && "Type mismatch");
DAG.ReplaceAllUsesWith(N, &RV);
}
// Push the new node and any users onto the worklist
AddToWorklist(RV.getNode());
AddUsersToWorklist(RV.getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node. This will also take care of adding any
// operands which have lost a user to the worklist.
recursivelyDeleteUnusedNodes(N);
}
// If the root changed (e.g. it was a dead load, update the root).
DAG.setRoot(Dummy.getValue());
DAG.RemoveDeadNodes();
}
SDValue DAGCombiner::visit(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::TokenFactor: return visitTokenFactor(N);
case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
case ISD::ADD: return visitADD(N);
case ISD::SUB: return visitSUB(N);
case ISD::SADDSAT:
case ISD::UADDSAT: return visitADDSAT(N);
case ISD::SSUBSAT:
case ISD::USUBSAT: return visitSUBSAT(N);
case ISD::ADDC: return visitADDC(N);
case ISD::SADDO:
case ISD::UADDO: return visitADDO(N);
case ISD::SUBC: return visitSUBC(N);
case ISD::SSUBO:
case ISD::USUBO: return visitSUBO(N);
case ISD::ADDE: return visitADDE(N);
case ISD::ADDCARRY: return visitADDCARRY(N);
case ISD::SUBE: return visitSUBE(N);
case ISD::SUBCARRY: return visitSUBCARRY(N);
case ISD::MUL: return visitMUL(N);
case ISD::SDIV: return visitSDIV(N);
case ISD::UDIV: return visitUDIV(N);
case ISD::SREM:
case ISD::UREM: return visitREM(N);
case ISD::MULHU: return visitMULHU(N);
case ISD::MULHS: return visitMULHS(N);
case ISD::SMUL_LOHI: return visitSMUL_LOHI(N);
case ISD::UMUL_LOHI: return visitUMUL_LOHI(N);
case ISD::SMULO:
case ISD::UMULO: return visitMULO(N);
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX: return visitIMINMAX(N);
case ISD::AND: return visitAND(N);
case ISD::OR: return visitOR(N);
case ISD::XOR: return visitXOR(N);
case ISD::SHL: return visitSHL(N);
case ISD::SRA: return visitSRA(N);
case ISD::SRL: return visitSRL(N);
case ISD::ROTR:
case ISD::ROTL: return visitRotate(N);
case ISD::FSHL:
case ISD::FSHR: return visitFunnelShift(N);
case ISD::ABS: return visitABS(N);
case ISD::BSWAP: return visitBSWAP(N);
case ISD::BITREVERSE: return visitBITREVERSE(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
case ISD::CTPOP: return visitCTPOP(N);
case ISD::SELECT: return visitSELECT(N);
case ISD::VSELECT: return visitVSELECT(N);
case ISD::SELECT_CC: return visitSELECT_CC(N);
case ISD::SETCC: return visitSETCC(N);
case ISD::SETCCCARRY: return visitSETCCCARRY(N);
case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
case ISD::AssertSext:
case ISD::AssertZext: return visitAssertExt(N);
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N);
case ISD::TRUNCATE: return visitTRUNCATE(N);
case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
case ISD::FADD: return visitFADD(N);
case ISD::FSUB: return visitFSUB(N);
case ISD::FMUL: return visitFMUL(N);
case ISD::FMA: return visitFMA(N);
case ISD::FDIV: return visitFDIV(N);
case ISD::FREM: return visitFREM(N);
case ISD::FSQRT: return visitFSQRT(N);
case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
case ISD::FPOW: return visitFPOW(N);
case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
case ISD::FP_ROUND: return visitFP_ROUND(N);
case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N);
case ISD::FP_EXTEND: return visitFP_EXTEND(N);
case ISD::FNEG: return visitFNEG(N);
case ISD::FABS: return visitFABS(N);
case ISD::FFLOOR: return visitFFLOOR(N);
case ISD::FMINNUM: return visitFMINNUM(N);
case ISD::FMAXNUM: return visitFMAXNUM(N);
case ISD::FMINIMUM: return visitFMINIMUM(N);
case ISD::FMAXIMUM: return visitFMAXIMUM(N);
case ISD::FCEIL: return visitFCEIL(N);
case ISD::FTRUNC: return visitFTRUNC(N);
case ISD::BRCOND: return visitBRCOND(N);
case ISD::BR_CC: return visitBR_CC(N);
case ISD::LOAD: return visitLOAD(N);
case ISD::STORE: return visitSTORE(N);
case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N);
case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
case ISD::MGATHER: return visitMGATHER(N);
case ISD::MLOAD: return visitMLOAD(N);
case ISD::MSCATTER: return visitMSCATTER(N);
case ISD::MSTORE: return visitMSTORE(N);
case ISD::LIFETIME_END: return visitLIFETIME_END(N);
case ISD::FP_TO_FP16: return visitFP_TO_FP16(N);
case ISD::FP16_TO_FP: return visitFP16_TO_FP(N);
case ISD::VECREDUCE_FADD:
case ISD::VECREDUCE_FMUL:
case ISD::VECREDUCE_ADD:
case ISD::VECREDUCE_MUL:
case ISD::VECREDUCE_AND:
case ISD::VECREDUCE_OR:
case ISD::VECREDUCE_XOR:
case ISD::VECREDUCE_SMAX:
case ISD::VECREDUCE_SMIN:
case ISD::VECREDUCE_UMAX:
case ISD::VECREDUCE_UMIN:
case ISD::VECREDUCE_FMAX:
case ISD::VECREDUCE_FMIN: return visitVECREDUCE(N);
}
return SDValue();
}
SDValue DAGCombiner::combine(SDNode *N) {
SDValue RV = visit(N);
// If nothing happened, try a target-specific DAG combine.
if (!RV.getNode()) {
assert(N->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned NULL!");
if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
// Expose the DAG combiner to the target combiner impls.
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
RV = TLI.PerformDAGCombine(N, DagCombineInfo);
}
}
// If nothing happened still, try promoting the operation.
if (!RV.getNode()) {
switch (N->getOpcode()) {
default: break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
RV = PromoteIntBinOp(SDValue(N, 0));
break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
RV = PromoteIntShiftOp(SDValue(N, 0));
break;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
RV = PromoteExtend(SDValue(N, 0));
break;
case ISD::LOAD:
if (PromoteLoad(SDValue(N, 0)))
RV = SDValue(N, 0);
break;
}
}
// If N is a commutative binary node, try to eliminate it if the commuted
// version is already present in the DAG.
if (!RV.getNode() && TLI.isCommutativeBinOp(N->getOpcode()) &&
N->getNumValues() == 1) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Constant operands are canonicalized to RHS.
if (N0 != N1 && (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1))) {
SDValue Ops[] = {N1, N0};
SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
N->getFlags());
if (CSENode)
return SDValue(CSENode, 0);
}
}
return RV;
}
/// Given a node, return its input chain if it has one, otherwise return a null
/// sd operand.
static SDValue getInputChainForNode(SDNode *N) {
if (unsigned NumOps = N->getNumOperands()) {
if (N->getOperand(0).getValueType() == MVT::Other)
return N->getOperand(0);
if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
return N->getOperand(NumOps-1);
for (unsigned i = 1; i < NumOps-1; ++i)
if (N->getOperand(i).getValueType() == MVT::Other)
return N->getOperand(i);
}
return SDValue();
}
SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
// If N has two operands, where one has an input chain equal to the other,
// the 'other' chain is redundant.
if (N->getNumOperands() == 2) {
if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
return N->getOperand(0);
if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
return N->getOperand(1);
}
// Don't simplify token factors if optnone.
if (OptLevel == CodeGenOpt::None)
return SDValue();
// If the sole user is a token factor, we should make sure we have a
// chance to merge them together. This prevents TF chains from inhibiting
// optimizations.
if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::TokenFactor)
AddToWorklist(*(N->use_begin()));
SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
SmallVector<SDValue, 8> Ops; // Ops for replacing token factor.
SmallPtrSet<SDNode*, 16> SeenOps;
bool Changed = false; // If we should replace this token factor.
// Start out with this token factor.
TFs.push_back(N);
// Iterate through token factors. The TFs grows when new token factors are
// encountered.
for (unsigned i = 0; i < TFs.size(); ++i) {
// Limit number of nodes to inline, to avoid quadratic compile times.
// We have to add the outstanding Token Factors to Ops, otherwise we might
// drop Ops from the resulting Token Factors.
if (Ops.size() > TokenFactorInlineLimit) {
for (unsigned j = i; j < TFs.size(); j++)
Ops.emplace_back(TFs[j], 0);
// Drop unprocessed Token Factors from TFs, so we do not add them to the
// combiner worklist later.
TFs.resize(i);
break;
}
SDNode *TF = TFs[i];
// Check each of the operands.
for (const SDValue &Op : TF->op_values()) {
switch (Op.getOpcode()) {
case ISD::EntryToken:
// Entry tokens don't need to be added to the list. They are
// redundant.
Changed = true;
break;
case ISD::TokenFactor:
if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) {
// Queue up for processing.
TFs.push_back(Op.getNode());
Changed = true;
break;
}
LLVM_FALLTHROUGH;
default:
// Only add if it isn't already in the list.
if (SeenOps.insert(Op.getNode()).second)
Ops.push_back(Op);
else
Changed = true;
break;
}
}
}
// Re-visit inlined Token Factors, to clean them up in case they have been
// removed. Skip the first Token Factor, as this is the current node.
for (unsigned i = 1, e = TFs.size(); i < e; i++)
AddToWorklist(TFs[i]);
// Remove Nodes that are chained to another node in the list. Do so
// by walking up chains breath-first stopping when we've seen
// another operand. In general we must climb to the EntryNode, but we can exit
// early if we find all remaining work is associated with just one operand as
// no further pruning is possible.
// List of nodes to search through and original Ops from which they originate.
SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist;
SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op.
SmallPtrSet<SDNode *, 16> SeenChains;
bool DidPruneOps = false;
unsigned NumLeftToConsider = 0;
for (const SDValue &Op : Ops) {
Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++));
OpWorkCount.push_back(1);
}
auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) {
// If this is an Op, we can remove the op from the list. Remark any
// search associated with it as from the current OpNumber.
if (SeenOps.count(Op) != 0) {
Changed = true;
DidPruneOps = true;
unsigned OrigOpNumber = 0;
while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op)
OrigOpNumber++;
assert((OrigOpNumber != Ops.size()) &&
"expected to find TokenFactor Operand");
// Re-mark worklist from OrigOpNumber to OpNumber
for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) {
if (Worklist[i].second == OrigOpNumber) {
Worklist[i].second = OpNumber;
}
}
OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber];
OpWorkCount[OrigOpNumber] = 0;
NumLeftToConsider--;
}
// Add if it's a new chain
if (SeenChains.insert(Op).second) {
OpWorkCount[OpNumber]++;
Worklist.push_back(std::make_pair(Op, OpNumber));
}
};
for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) {
// We need at least be consider at least 2 Ops to prune.
if (NumLeftToConsider <= 1)
break;
auto CurNode = Worklist[i].first;
auto CurOpNumber = Worklist[i].second;
assert((OpWorkCount[CurOpNumber] > 0) &&
"Node should not appear in worklist");
switch (CurNode->getOpcode()) {
case ISD::EntryToken:
// Hitting EntryToken is the only way for the search to terminate without
// hitting
// another operand's search. Prevent us from marking this operand
// considered.
NumLeftToConsider++;
break;
case ISD::TokenFactor:
for (const SDValue &Op : CurNode->op_values())
AddToWorklist(i, Op.getNode(), CurOpNumber);
break;
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
case ISD::CopyFromReg:
case ISD::CopyToReg:
AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber);
break;
default:
if (auto *MemNode = dyn_cast<MemSDNode>(CurNode))
AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber);
break;
}
OpWorkCount[CurOpNumber]--;
if (OpWorkCount[CurOpNumber] == 0)
NumLeftToConsider--;
}
// If we've changed things around then replace token factor.
if (Changed) {
SDValue Result;
if (Ops.empty()) {
// The entry token is the only possible outcome.
Result = DAG.getEntryNode();
} else {
if (DidPruneOps) {
SmallVector<SDValue, 8> PrunedOps;
//
for (const SDValue &Op : Ops) {
if (SeenChains.count(Op.getNode()) == 0)
PrunedOps.push_back(Op);
}
Result = DAG.getTokenFactor(SDLoc(N), PrunedOps);
} else {
Result = DAG.getTokenFactor(SDLoc(N), Ops);
}
}
return Result;
}
return SDValue();
}
/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
WorklistRemover DeadNodes(*this);
// Replacing results may cause a different MERGE_VALUES to suddenly
// be CSE'd with N, and carry its uses with it. Iterate until no
// uses remain, to ensure that the node can be safely deleted.
// First add the users of this node to the work list so that they
// can be tried again once they have new operands.
AddUsersToWorklist(N);
do {
// Do as a single replacement to avoid rewalking use lists.
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
Ops.push_back(N->getOperand(i));
DAG.ReplaceAllUsesWith(N, Ops.data());
} while (!N->use_empty());
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
/// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a
/// ConstantSDNode pointer else nullptr.
static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
}
SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
assert(TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 &&
"Unexpected binary operator");
// Don't do this unless the old select is going away. We want to eliminate the
// binary operator, not replace a binop with a select.
// TODO: Handle ISD::SELECT_CC.
unsigned SelOpNo = 0;
SDValue Sel = BO->getOperand(0);
if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
SelOpNo = 1;
Sel = BO->getOperand(1);
}
if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
return SDValue();
SDValue CT = Sel.getOperand(1);
if (!isConstantOrConstantVector(CT, true) &&
!isConstantFPBuildVectorOrConstantFP(CT))
return SDValue();
SDValue CF = Sel.getOperand(2);
if (!isConstantOrConstantVector(CF, true) &&
!isConstantFPBuildVectorOrConstantFP(CF))
return SDValue();
// Bail out if any constants are opaque because we can't constant fold those.
// The exception is "and" and "or" with either 0 or -1 in which case we can
// propagate non constant operands into select. I.e.:
// and (select Cond, 0, -1), X --> select Cond, 0, X
// or X, (select Cond, -1, 0) --> select Cond, -1, X
auto BinOpcode = BO->getOpcode();
bool CanFoldNonConst =
(BinOpcode == ISD::AND || BinOpcode == ISD::OR) &&
(isNullOrNullSplat(CT) || isAllOnesOrAllOnesSplat(CT)) &&
(isNullOrNullSplat(CF) || isAllOnesOrAllOnesSplat(CF));
SDValue CBO = BO->getOperand(SelOpNo ^ 1);
if (!CanFoldNonConst &&
!isConstantOrConstantVector(CBO, true) &&
!isConstantFPBuildVectorOrConstantFP(CBO))
return SDValue();
EVT VT = Sel.getValueType();
// In case of shift value and shift amount may have different VT. For instance
// on x86 shift amount is i8 regardles of LHS type. Bail out if we have
// swapped operands and value types do not match. NB: x86 is fine if operands
// are not swapped with shift amount VT being not bigger than shifted value.
// TODO: that is possible to check for a shift operation, correct VTs and
// still perform optimization on x86 if needed.
if (SelOpNo && VT != CBO.getValueType())
return SDValue();
// We have a select-of-constants followed by a binary operator with a
// constant. Eliminate the binop by pulling the constant math into the select.
// Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
SDLoc DL(Sel);
SDValue NewCT = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CT)
: DAG.getNode(BinOpcode, DL, VT, CT, CBO);
if (!CanFoldNonConst && !NewCT.isUndef() &&
!isConstantOrConstantVector(NewCT, true) &&
!isConstantFPBuildVectorOrConstantFP(NewCT))
return SDValue();
SDValue NewCF = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CF)
: DAG.getNode(BinOpcode, DL, VT, CF, CBO);
if (!CanFoldNonConst && !NewCF.isUndef() &&
!isConstantOrConstantVector(NewCF, true) &&
!isConstantFPBuildVectorOrConstantFP(NewCF))
return SDValue();
SDValue SelectOp = DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF);
SelectOp->setFlags(BO->getFlags());
return SelectOp;
}
static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Expecting add or sub");
// Match a constant operand and a zext operand for the math instruction:
// add Z, C
// sub C, Z
bool IsAdd = N->getOpcode() == ISD::ADD;
SDValue C = IsAdd ? N->getOperand(1) : N->getOperand(0);
SDValue Z = IsAdd ? N->getOperand(0) : N->getOperand(1);
auto *CN = dyn_cast<ConstantSDNode>(C);
if (!CN || Z.getOpcode() != ISD::ZERO_EXTEND)
return SDValue();
// Match the zext operand as a setcc of a boolean.
if (Z.getOperand(0).getOpcode() != ISD::SETCC ||
Z.getOperand(0).getValueType() != MVT::i1)
return SDValue();
// Match the compare as: setcc (X & 1), 0, eq.
SDValue SetCC = Z.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
if (CC != ISD::SETEQ || !isNullConstant(SetCC.getOperand(1)) ||
SetCC.getOperand(0).getOpcode() != ISD::AND ||
!isOneConstant(SetCC.getOperand(0).getOperand(1)))
return SDValue();
// We are adding/subtracting a constant and an inverted low bit. Turn that
// into a subtract/add of the low bit with incremented/decremented constant:
// add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1))
// sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1))
EVT VT = C.getValueType();
SDLoc DL(N);
SDValue LowBit = DAG.getZExtOrTrunc(SetCC.getOperand(0), DL, VT);
SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) :
DAG.getConstant(CN->getAPIntValue() - 1, DL, VT);
return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit);
}
/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
/// a shift and add with a different constant.
static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Expecting add or sub");
// We need a constant operand for the add/sub, and the other operand is a
// logical shift right: add (srl), C or sub C, (srl).
// TODO - support non-uniform vector amounts.
bool IsAdd = N->getOpcode() == ISD::ADD;
SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0);
SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1);
ConstantSDNode *C = isConstOrConstSplat(ConstantOp);
if (!C || ShiftOp.getOpcode() != ISD::SRL)
return SDValue();
// The shift must be of a 'not' value.
SDValue Not = ShiftOp.getOperand(0);
if (!Not.hasOneUse() || !isBitwiseNot(Not))
return SDValue();
// The shift must be moving the sign bit to the least-significant-bit.
EVT VT = ShiftOp.getValueType();
SDValue ShAmt = ShiftOp.getOperand(1);
ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
if (!ShAmtC || ShAmtC->getAPIntValue() != (VT.getScalarSizeInBits() - 1))
return SDValue();
// Eliminate the 'not' by adjusting the shift and add/sub constant:
// add (srl (not X), 31), C --> add (sra X, 31), (C + 1)
// sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1)
SDLoc DL(N);
auto ShOpcode = IsAdd ? ISD::SRA : ISD::SRL;
SDValue NewShift = DAG.getNode(ShOpcode, DL, VT, Not.getOperand(0), ShAmt);
APInt NewC = IsAdd ? C->getAPIntValue() + 1 : C->getAPIntValue() - 1;
return DAG.getNode(ISD::ADD, DL, VT, NewShift, DAG.getConstant(NewC, DL, VT));
}
/// Try to fold a node that behaves like an ADD (note that N isn't necessarily
/// an ISD::ADD here, it could for example be an ISD::OR if we know that there
/// are no common bits set in the operands).
SDValue DAGCombiner::visitADDLike(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (add x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
}
// fold (add x, undef) -> undef
if (N0.isUndef())
return N0;
if (N1.isUndef())
return N1;
if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
// canonicalize constant to RHS
if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
// fold (add c1, c2) -> c1+c2
return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(),
N1.getNode());
}
// fold (add x, 0) -> x
if (isNullConstant(N1))
return N0;
if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) {
// fold ((A-c1)+c2) -> (A+(c2-c1))
if (N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) {
SDValue Sub = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N1.getNode(),
N0.getOperand(1).getNode());
assert(Sub && "Constant folding failed");
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Sub);
}
// fold ((c1-A)+c2) -> (c1+c2)-A
if (N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
SDValue Add = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N1.getNode(),
N0.getOperand(0).getNode());
assert(Add && "Constant folding failed");
return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
}
// add (sext i1 X), 1 -> zext (not i1 X)
// We don't transform this pattern:
// add (zext i1 X), -1 -> sext (not i1 X)
// because most (?) targets generate better code for the zext form.
if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
isOneOrOneSplat(N1)) {
SDValue X = N0.getOperand(0);
if ((!LegalOperations ||
(TLI.isOperationLegal(ISD::XOR, X.getValueType()) &&
TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) &&
X.getScalarValueSizeInBits() == 1) {
SDValue Not = DAG.getNOT(DL, X, X.getValueType());
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not);
}
}
// Undo the add -> or combine to merge constant offsets from a frame index.
if (N0.getOpcode() == ISD::OR &&
isa<FrameIndexSDNode>(N0.getOperand(0)) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
SDValue Add0 = DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(1));
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add0);
}
}
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// reassociate add
if (!reassociationCanBreakAddressingModePattern(ISD::ADD, DL, N0, N1)) {
if (SDValue RADD = reassociateOps(ISD::ADD, DL, N0, N1, N->getFlags()))
return RADD;
}
// fold ((0-A) + B) -> B-A
if (N0.getOpcode() == ISD::SUB && isNullOrNullSplat(N0.getOperand(0)))
return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
// fold (A + (0-B)) -> A-B
if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1));
// fold (A+(B-A)) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
return N1.getOperand(0);
// fold ((B-A)+A) -> B
if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
return N0.getOperand(0);
// fold ((A-B)+(C-A)) -> (C-B)
if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
N0.getOperand(0) == N1.getOperand(1))
return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
N0.getOperand(1));
// fold ((A-B)+(B-C)) -> (A-C)
if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
N0.getOperand(1) == N1.getOperand(0))
return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
N1.getOperand(1));
// fold (A+(B-(A+C))) to (B-C)
if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
N0 == N1.getOperand(1).getOperand(0))
return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
N1.getOperand(1).getOperand(1));
// fold (A+(B-(C+A))) to (B-C)
if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
N0 == N1.getOperand(1).getOperand(1))
return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
N1.getOperand(1).getOperand(0));
// fold (A+((B-A)+or-C)) to (B+or-C)
if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
N1.getOperand(0).getOpcode() == ISD::SUB &&
N0 == N1.getOperand(0).getOperand(1))
return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0),
N1.getOperand(1));
// fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10))
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
}
// fold (add (umax X, C), -C) --> (usubsat X, C)
if (N0.getOpcode() == ISD::UMAX && hasOperation(ISD::USUBSAT, VT)) {
auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) {
return (!Max && !Op) ||
(Max && Op && Max->getAPIntValue() == (-Op->getAPIntValue()));
};
if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchUSUBSAT,
/*AllowUndefs*/ true))
return DAG.getNode(ISD::USUBSAT, DL, VT, N0.getOperand(0),
N0.getOperand(1));
}
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
if (isOneOrOneSplat(N1)) {
// fold (add (xor a, -1), 1) -> (sub 0, a)
if (isBitwiseNot(N0))
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
N0.getOperand(0));
// fold (add (add (xor a, -1), b), 1) -> (sub b, a)
if (N0.getOpcode() == ISD::ADD ||
N0.getOpcode() == ISD::UADDO ||
N0.getOpcode() == ISD::SADDO) {
SDValue A, Xor;
if (isBitwiseNot(N0.getOperand(0))) {
A = N0.getOperand(1);
Xor = N0.getOperand(0);
} else if (isBitwiseNot(N0.getOperand(1))) {
A = N0.getOperand(0);
Xor = N0.getOperand(1);
}
if (Xor)
return DAG.getNode(ISD::SUB, DL, VT, A, Xor.getOperand(0));
}
// Look for:
// add (add x, y), 1
// And if the target does not like this form then turn into:
// sub y, (xor x, -1)
if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
N0.getOpcode() == ISD::ADD) {
SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
DAG.getAllOnesConstant(DL, VT));
return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(1), Not);
}
}
// (x - y) + -1 -> add (xor y, -1), x
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
isAllOnesOrAllOnesSplat(N1)) {
SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), N1);
return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
}
if (SDValue Combined = visitADDLikeCommutative(N0, N1, N))
return Combined;
if (SDValue Combined = visitADDLikeCommutative(N1, N0, N))
return Combined;
return SDValue();
}
SDValue DAGCombiner::visitADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
if (SDValue Combined = visitADDLike(N))
return Combined;
if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
return V;
if (SDValue V = foldAddSubOfSignBit(N, DAG))
return V;
// fold (a+b) -> (a|b) iff a and b share no bits.
if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
DAG.haveNoCommonBitsSet(N0, N1))
return DAG.getNode(ISD::OR, DL, VT, N0, N1);
return SDValue();
}
SDValue DAGCombiner::visitADDSAT(SDNode *N) {
unsigned Opcode = N->getOpcode();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// fold vector ops
if (VT.isVector()) {
// TODO SimplifyVBinOp
// fold (add_sat x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
}
// fold (add_sat x, undef) -> -1
if (N0.isUndef() || N1.isUndef())
return DAG.getAllOnesConstant(DL, VT);
if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
// canonicalize constant to RHS
if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(Opcode, DL, VT, N1, N0);
// fold (add_sat c1, c2) -> c3
return DAG.FoldConstantArithmetic(Opcode, DL, VT, N0.getNode(),
N1.getNode());
}
// fold (add_sat x, 0) -> x
if (isNullConstant(N1))
return N0;
// If it cannot overflow, transform into an add.
if (Opcode == ISD::UADDSAT)
if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
return SDValue();
}
static SDValue getAsCarry(const TargetLowering &TLI, SDValue V) {
bool Masked = false;
// First, peel away TRUNCATE/ZERO_EXTEND/AND nodes due to legalization.
while (true) {
if (V.getOpcode() == ISD::TRUNCATE || V.getOpcode() == ISD::ZERO_EXTEND) {
V = V.getOperand(0);
continue;
}
if (V.getOpcode() == ISD::AND && isOneConstant(V.getOperand(1))) {
Masked = true;
V = V.getOperand(0);
continue;
}
break;
}
// If this is not a carry, return.
if (V.getResNo() != 1)
return SDValue();
if (V.getOpcode() != ISD::ADDCARRY && V.getOpcode() != ISD::SUBCARRY &&
V.getOpcode() != ISD::UADDO && V.getOpcode() != ISD::USUBO)
return SDValue();
EVT VT = V.getNode()->getValueType(0);
if (!TLI.isOperationLegalOrCustom(V.getOpcode(), VT))
return SDValue();
// If the result is masked, then no matter what kind of bool it is we can
// return. If it isn't, then we need to make sure the bool type is either 0 or
// 1 and not other values.
if (Masked ||
TLI.getBooleanContents(V.getValueType()) ==
TargetLoweringBase::ZeroOrOneBooleanContent)
return V;
return SDValue();
}
/// Given the operands of an add/sub operation, see if the 2nd operand is a
/// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert
/// the opcode and bypass the mask operation.
static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1,
SelectionDAG &DAG, const SDLoc &DL) {
if (N1.getOpcode() != ISD::AND || !isOneOrOneSplat(N1->getOperand(1)))
return SDValue();
EVT VT = N0.getValueType();
if (DAG.ComputeNumSignBits(N1.getOperand(0)) != VT.getScalarSizeInBits())
return SDValue();
// add N0, (and (AssertSext X, i1), 1) --> sub N0, X
// sub N0, (and (AssertSext X, i1), 1) --> add N0, X
return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, N0, N1.getOperand(0));
}
/// Helper for doing combines based on N0 and N1 being added to each other.
SDValue DAGCombiner::visitADDLikeCommutative(SDValue N0, SDValue N1,
SDNode *LocReference) {
EVT VT = N0.getValueType();
SDLoc DL(LocReference);
// fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
isNullOrNullSplat(N1.getOperand(0).getOperand(0)))
return DAG.getNode(ISD::SUB, DL, VT, N0,
DAG.getNode(ISD::SHL, DL, VT,
N1.getOperand(0).getOperand(1),
N1.getOperand(1)));
if (SDValue V = foldAddSubMasked1(true, N0, N1, DAG, DL))
return V;
// Look for:
// add (add x, 1), y
// And if the target does not like this form then turn into:
// sub y, (xor x, -1)
if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
N0.getOpcode() == ISD::ADD && isOneOrOneSplat(N0.getOperand(1))) {
SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
DAG.getAllOnesConstant(DL, VT));
return DAG.getNode(ISD::SUB, DL, VT, N1, Not);
}
// Hoist one-use subtraction by non-opaque constant:
// (x - C) + y -> (x + y) - C
// This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), N1);
return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
}
// Hoist one-use subtraction from non-opaque constant:
// (C - x) + y -> (y - x) + C
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(0));
}
// If the target's bool is represented as 0/1, prefer to make this 'sub 0/1'
// rather than 'add 0/-1' (the zext should get folded).
// add (sext i1 Y), X --> sub X, (zext i1 Y)
if (N0.getOpcode() == ISD::SIGN_EXTEND &&
N0.getOperand(0).getScalarValueSizeInBits() == 1 &&
TLI.getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent) {
SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
}
// add X, (sextinreg Y i1) -> sub X, (and Y 1)
if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
if (TN->getVT() == MVT::i1) {
SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
DAG.getConstant(1, DL, VT));
return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt);
}
}
// (add X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1)) &&
N1.getResNo() == 0)
return DAG.getNode(ISD::ADDCARRY, DL, N1->getVTList(),
N0, N1.getOperand(0), N1.getOperand(2));
// (add X, Carry) -> (addcarry X, 0, Carry)
if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
if (SDValue Carry = getAsCarry(TLI, N1))
return DAG.getNode(ISD::ADDCARRY, DL,
DAG.getVTList(VT, Carry.getValueType()), N0,
DAG.getConstant(0, DL, VT), Carry);
return SDValue();
}
SDValue DAGCombiner::visitADDC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// If the flag result is dead, turn this into an ADD.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
// canonicalize constant to RHS.
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && !N1C)
return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0);
// fold (addc x, 0) -> x + no carry out
if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
DL, MVT::Glue));
// If it cannot overflow, transform into an add.
if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
return SDValue();
}
static SDValue flipBoolean(SDValue V, const SDLoc &DL,
SelectionDAG &DAG, const TargetLowering &TLI) {
EVT VT = V.getValueType();
SDValue Cst;
switch (TLI.getBooleanContents(VT)) {
case TargetLowering::ZeroOrOneBooleanContent:
case TargetLowering::UndefinedBooleanContent:
Cst = DAG.getConstant(1, DL, VT);
break;
case TargetLowering::ZeroOrNegativeOneBooleanContent:
Cst = DAG.getAllOnesConstant(DL, VT);
break;
}
return DAG.getNode(ISD::XOR, DL, VT, V, Cst);
}
/**
* Flips a boolean if it is cheaper to compute. If the Force parameters is set,
* then the flip also occurs if computing the inverse is the same cost.
* This function returns an empty SDValue in case it cannot flip the boolean
* without increasing the cost of the computation. If you want to flip a boolean
* no matter what, use flipBoolean.
*/
static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
const TargetLowering &TLI,
bool Force) {
if (Force && isa<ConstantSDNode>(V))
return flipBoolean(V, SDLoc(V), DAG, TLI);
if (V.getOpcode() != ISD::XOR)
return SDValue();
ConstantSDNode *Const = isConstOrConstSplat(V.getOperand(1), false);
if (!Const)
return SDValue();
EVT VT = V.getValueType();
bool IsFlip = false;
switch(TLI.getBooleanContents(VT)) {
case TargetLowering::ZeroOrOneBooleanContent:
IsFlip = Const->isOne();
break;
case TargetLowering::ZeroOrNegativeOneBooleanContent:
IsFlip = Const->isAllOnesValue();
break;
case TargetLowering::UndefinedBooleanContent:
IsFlip = (Const->getAPIntValue() & 0x01) == 1;
break;
}
if (IsFlip)
return V.getOperand(0);
if (Force)
return flipBoolean(V, SDLoc(V), DAG, TLI);
return SDValue();
}
SDValue DAGCombiner::visitADDO(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
bool IsSigned = (ISD::SADDO == N->getOpcode());
EVT CarryVT = N->getValueType(1);
SDLoc DL(N);
// If the flag result is dead, turn this into an ADD.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
DAG.getUNDEF(CarryVT));
// canonicalize constant to RHS.
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0);
// fold (addo x, 0) -> x + no carry out
if (isNullOrNullSplat(N1))
return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
if (!IsSigned) {
// If it cannot overflow, transform into an add.
if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
DAG.getConstant(0, DL, CarryVT));
// fold (uaddo (xor a, -1), 1) -> (usub 0, a) and flip carry.
if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) {
SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(),
DAG.getConstant(0, DL, VT), N0.getOperand(0));
return CombineTo(N, Sub,
flipBoolean(Sub.getValue(1), DL, DAG, TLI));
}
if (SDValue Combined = visitUADDOLike(N0, N1, N))
return Combined;
if (SDValue Combined = visitUADDOLike(N1, N0, N))
return Combined;
}
return SDValue();
}
SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {
EVT VT = N0.getValueType();
if (VT.isVector())
return SDValue();
// (uaddo X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
// If Y + 1 cannot overflow.
if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1))) {
SDValue Y = N1.getOperand(0);
SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType());
if (DAG.computeOverflowKind(Y, One) == SelectionDAG::OFK_Never)
return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0, Y,
N1.getOperand(2));
}
// (uaddo X, Carry) -> (addcarry X, 0, Carry)
if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
if (SDValue Carry = getAsCarry(TLI, N1))
return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0,
DAG.getConstant(0, SDLoc(N), VT), Carry);
return SDValue();
}
SDValue DAGCombiner::visitADDE(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
// canonicalize constant to RHS
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && !N1C)
return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
N1, N0, CarryIn);
// fold (adde x, y, false) -> (addc x, y)
if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);
return SDValue();
}
SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
SDLoc DL(N);
// canonicalize constant to RHS
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && !N1C)
return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), N1, N0, CarryIn);
// fold (addcarry x, y, false) -> (uaddo x, y)
if (isNullConstant(CarryIn)) {
if (!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::UADDO, N->getValueType(0)))
return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1);
}
// fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry.
if (isNullConstant(N0) && isNullConstant(N1)) {
EVT VT = N0.getValueType();
EVT CarryVT = CarryIn.getValueType();
SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT);
AddToWorklist(CarryExt.getNode());
return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt,
DAG.getConstant(1, DL, VT)),
DAG.getConstant(0, DL, CarryVT));
}
if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N))
return Combined;
if (SDValue Combined = visitADDCARRYLike(N1, N0, CarryIn, N))
return Combined;
return SDValue();
}
/**
* If we are facing some sort of diamond carry propapagtion pattern try to
* break it up to generate something like:
* (addcarry X, 0, (addcarry A, B, Z):Carry)
*
* The end result is usually an increase in operation required, but because the
* carry is now linearized, other tranforms can kick in and optimize the DAG.
*
* Patterns typically look something like
* (uaddo A, B)
* / \
* Carry Sum
* | \
* | (addcarry *, 0, Z)
* | /
* \ Carry
* | /
* (addcarry X, *, *)
*
* But numerous variation exist. Our goal is to identify A, B, X and Z and
* produce a combine with a single path for carry propagation.
*/
static SDValue combineADDCARRYDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
SDValue X, SDValue Carry0, SDValue Carry1,
SDNode *N) {
if (Carry1.getResNo() != 1 || Carry0.getResNo() != 1)
return SDValue();
if (Carry1.getOpcode() != ISD::UADDO)
return SDValue();
SDValue Z;
/**
* First look for a suitable Z. It will present itself in the form of
* (addcarry Y, 0, Z) or its equivalent (uaddo Y, 1) for Z=true
*/
if (Carry0.getOpcode() == ISD::ADDCARRY &&
isNullConstant(Carry0.getOperand(1))) {
Z = Carry0.getOperand(2);
} else if (Carry0.getOpcode() == ISD::UADDO &&
isOneConstant(Carry0.getOperand(1))) {
EVT VT = Combiner.getSetCCResultType(Carry0.getValueType());
Z = DAG.getConstant(1, SDLoc(Carry0.getOperand(1)), VT);
} else {
// We couldn't find a suitable Z.
return SDValue();
}
auto cancelDiamond = [&](SDValue A,SDValue B) {
SDLoc DL(N);
SDValue NewY = DAG.getNode(ISD::ADDCARRY, DL, Carry0->getVTList(), A, B, Z);
Combiner.AddToWorklist(NewY.getNode());
return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), X,
DAG.getConstant(0, DL, X.getValueType()),
NewY.getValue(1));
};
/**
* (uaddo A, B)
* |
* Sum
* |
* (addcarry *, 0, Z)
*/
if (Carry0.getOperand(0) == Carry1.getValue(0)) {
return cancelDiamond(Carry1.getOperand(0), Carry1.getOperand(1));
}
/**
* (addcarry A, 0, Z)
* |
* Sum
* |
* (uaddo *, B)
*/
if (Carry1.getOperand(0) == Carry0.getValue(0)) {
return cancelDiamond(Carry0.getOperand(0), Carry1.getOperand(1));
}
if (Carry1.getOperand(1) == Carry0.getValue(0)) {
return cancelDiamond(Carry1.getOperand(0), Carry0.getOperand(0));
}
return SDValue();
}
SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
SDNode *N) {
// fold (addcarry (xor a, -1), b, c) -> (subcarry b, a, !c) and flip carry.
if (isBitwiseNot(N0))
if (SDValue NotC = extractBooleanFlip(CarryIn, DAG, TLI, true)) {
SDLoc DL(N);
SDValue Sub = DAG.getNode(ISD::SUBCARRY, DL, N->getVTList(), N1,
N0.getOperand(0), NotC);
return CombineTo(N, Sub,
flipBoolean(Sub.getValue(1), DL, DAG, TLI));
}
// Iff the flag result is dead:
// (addcarry (add|uaddo X, Y), 0, Carry) -> (addcarry X, Y, Carry)
// Don't do this if the Carry comes from the uaddo. It won't remove the uaddo
// or the dependency between the instructions.
if ((N0.getOpcode() == ISD::ADD ||
(N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0 &&
N0.getValue(1) != CarryIn)) &&
isNullConstant(N1) && !N->hasAnyUseOfValue(1))
return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
N0.getOperand(0), N0.getOperand(1), CarryIn);
/**
* When one of the addcarry argument is itself a carry, we may be facing
* a diamond carry propagation. In which case we try to transform the DAG
* to ensure linear carry propagation if that is possible.
*/
if (auto Y = getAsCarry(TLI, N1)) {
// Because both are carries, Y and Z can be swapped.
if (auto R = combineADDCARRYDiamond(*this, DAG, N0, Y, CarryIn, N))
return R;
if (auto R = combineADDCARRYDiamond(*this, DAG, N0, CarryIn, Y, N))
return R;
}
return SDValue();
}
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
SelectionDAG &DAG, bool LegalOperations) {
if (!VT.isVector())
return DAG.getConstant(0, DL, VT);
if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return DAG.getConstant(0, DL, VT);
return SDValue();
}
SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (sub x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
}
// fold (sub x, x) -> 0
// FIXME: Refactor this and xor and other similar operations together.
if (N0 == N1)
return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
// fold (sub c1, c2) -> c1-c2
return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(),
N1.getNode());
}
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
// fold (sub x, c) -> (add x, -c)
if (N1C) {
return DAG.getNode(ISD::ADD, DL, VT, N0,
DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
}
if (isNullOrNullSplat(N0)) {
unsigned BitWidth = VT.getScalarSizeInBits();
// Right-shifting everything out but the sign bit followed by negation is
// the same as flipping arithmetic/logical shift type without the negation:
// -(X >>u 31) -> (X >>s 31)
// -(X >>s 31) -> (X >>u 31)
if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) {
ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1));
if (ShiftAmt && ShiftAmt->getAPIntValue() == (BitWidth - 1)) {
auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA;
if (!LegalOperations || TLI.isOperationLegal(NewSh, VT))
return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1));
}
}
// 0 - X --> 0 if the sub is NUW.
if (N->getFlags().hasNoUnsignedWrap())
return N0;
if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
// N1 is either 0 or the minimum signed value. If the sub is NSW, then
// N1 must be 0 because negating the minimum signed value is undefined.
if (N->getFlags().hasNoSignedWrap())
return N0;
// 0 - X --> X if X is 0 or the minimum signed value.
return N1;
}
}
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
if (isAllOnesOrAllOnesSplat(N0))
return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
// fold (A - (0-B)) -> A+B
if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
return DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(1));
// fold A-(A-B) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
return N1.getOperand(1);
// fold (A+B)-A -> B
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
return N0.getOperand(1);
// fold (A+B)-B -> A
if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
return N0.getOperand(0);
// fold (A+C1)-C2 -> A+(C1-C2)
if (N0.getOpcode() == ISD::ADD &&
isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
SDValue NewC = DAG.FoldConstantArithmetic(
ISD::SUB, DL, VT, N0.getOperand(1).getNode(), N1.getNode());
assert(NewC && "Constant folding failed");
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), NewC);
}
// fold C2-(A+C1) -> (C2-C1)-A
if (N1.getOpcode() == ISD::ADD) {
SDValue N11 = N1.getOperand(1);
if (isConstantOrConstantVector(N0, /* NoOpaques */ true) &&
isConstantOrConstantVector(N11, /* NoOpaques */ true)) {
SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(),
N11.getNode());
assert(NewC && "Constant folding failed");
return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0));
}
}
// fold (A-C1)-C2 -> A-(C1+C2)
if (N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
SDValue NewC = DAG.FoldConstantArithmetic(
ISD::ADD, DL, VT, N0.getOperand(1).getNode(), N1.getNode());
assert(NewC && "Constant folding failed");
return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC);
}
// fold (c1-A)-c2 -> (c1-c2)-A
if (N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
isConstantOrConstantVector(N0.getOperand(0), /* NoOpaques */ true)) {
SDValue NewC = DAG.FoldConstantArithmetic(
ISD::SUB, DL, VT, N0.getOperand(0).getNode(), N1.getNode());
assert(NewC && "Constant folding failed");
return DAG.getNode(ISD::SUB, DL, VT, NewC, N0.getOperand(1));
}
// fold ((A+(B+or-C))-B) -> A+or-C
if (N0.getOpcode() == ISD::ADD &&
(N0.getOperand(1).getOpcode() == ISD::SUB ||
N0.getOperand(1).getOpcode() == ISD::ADD) &&
N0.getOperand(1).getOperand(0) == N1)
return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0),
N0.getOperand(1).getOperand(1));
// fold ((A+(C+B))-B) -> A+C
if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD &&
N0.getOperand(1).getOperand(1) == N1)
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0),
N0.getOperand(1).getOperand(0));
// fold ((A-(B-C))-C) -> A-B
if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB &&
N0.getOperand(1).getOperand(1) == N1)
return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
N0.getOperand(1).getOperand(0));
// fold (A-(B-C)) -> A+(C-B)
if (N1.getOpcode() == ISD::SUB && N1.hasOneUse())
return DAG.getNode(ISD::ADD, DL, VT, N0,
DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(1),
N1.getOperand(0)));
// fold (X - (-Y * Z)) -> (X + (Y * Z))
if (N1.getOpcode() == ISD::MUL && N1.hasOneUse()) {
if (N1.getOperand(0).getOpcode() == ISD::SUB &&
isNullOrNullSplat(N1.getOperand(0).getOperand(0))) {
SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
N1.getOperand(0).getOperand(1),
N1.getOperand(1));
return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
}
if (N1.getOperand(1).getOpcode() == ISD::SUB &&
isNullOrNullSplat(N1.getOperand(1).getOperand(0))) {
SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
N1.getOperand(0),
N1.getOperand(1).getOperand(1));
return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
}
}
// If either operand of a sub is undef, the result is undef
if (N0.isUndef())
return N0;
if (N1.isUndef())
return N1;
if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
return V;
if (SDValue V = foldAddSubOfSignBit(N, DAG))
return V;
if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, SDLoc(N)))
return V;
// (x - y) - 1 -> add (xor y, -1), x
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB && isOneOrOneSplat(N1)) {
SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1),
DAG.getAllOnesConstant(DL, VT));
return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
}
// Look for:
// sub y, (xor x, -1)
// And if the target does not like this form then turn into:
// add (add x, y), 1
if (TLI.preferIncOfAddToSubOfNot(VT) && N1.hasOneUse() && isBitwiseNot(N1)) {
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(0));
return DAG.getNode(ISD::ADD, DL, VT, Add, DAG.getConstant(1, DL, VT));
}
// Hoist one-use addition by non-opaque constant:
// (x + C) - y -> (x - y) + C
if (N0.hasOneUse() && N0.getOpcode() == ISD::ADD &&
isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(1));
}
// y - (x + C) -> (y - x) - C
if (N1.hasOneUse() && N1.getOpcode() == ISD::ADD &&
isConstantOrConstantVector(N1.getOperand(1), /*NoOpaques=*/true)) {
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(0));
return DAG.getNode(ISD::SUB, DL, VT, Sub, N1.getOperand(1));
}
// (x - C) - y -> (x - y) - C
// This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
return DAG.getNode(ISD::SUB, DL, VT, Sub, N0.getOperand(1));
}
// (C - x) - y -> C - (x + y)
if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), N1);
return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), Add);
}
// If the target's bool is represented as 0/-1, prefer to make this 'add 0/-1'
// rather than 'sub 0/1' (the sext should get folded).
// sub X, (zext i1 Y) --> add X, (sext i1 Y)
if (N1.getOpcode() == ISD::ZERO_EXTEND &&
N1.getOperand(0).getScalarValueSizeInBits() == 1 &&
TLI.getBooleanContents(VT) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N1.getOperand(0));
return DAG.getNode(ISD::ADD, DL, VT, N0, SExt);
}
// fold Y = sra (X, size(X)-1); sub (xor (X, Y), Y) -> (abs X)
if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) {
SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1);
SDValue S0 = N1.getOperand(0);
if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0)) {
unsigned OpSizeInBits = VT.getScalarSizeInBits();
if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
if (C->getAPIntValue() == (OpSizeInBits - 1))
return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
}
}
}
// If the relocation model supports it, consider symbol offsets.
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
// fold (sub Sym, c) -> Sym-c
if (N1C && GA->getOpcode() == ISD::GlobalAddress)
return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
GA->getOffset() -
(uint64_t)N1C->getSExtValue());
// fold (sub Sym+c1, Sym+c2) -> c1-c2
if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
if (GA->getGlobal() == GB->getGlobal())
return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
DL, VT);
}
// sub X, (sextinreg Y i1) -> add X, (and Y 1)
if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
if (TN->getVT() == MVT::i1) {
SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
DAG.getConstant(1, DL, VT));
return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt);
}
}
// Prefer an add for more folding potential and possibly better codegen:
// sub N0, (lshr N10, width-1) --> add N0, (ashr N10, width-1)
if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) {
SDValue ShAmt = N1.getOperand(1);
ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
if (ShAmtC &&
ShAmtC->getAPIntValue() == (N1.getScalarValueSizeInBits() - 1)) {
SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0), ShAmt);
return DAG.getNode(ISD::ADD, DL, VT, N0, SRA);
}
}
return SDValue();
}
SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// fold vector ops
if (VT.isVector()) {
// TODO SimplifyVBinOp
// fold (sub_sat x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
}
// fold (sub_sat x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
// fold (sub_sat x, x) -> 0
if (N0 == N1)
return DAG.getConstant(0, DL, VT);
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
// fold (sub_sat c1, c2) -> c3
return DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, N0.getNode(),
N1.getNode());
}
// fold (sub_sat x, 0) -> x
if (isNullConstant(N1))
return N0;
return SDValue();
}
SDValue DAGCombiner::visitSUBC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
SDLoc DL(N);
// If the flag result is dead, turn this into an SUB.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
// fold (subc x, x) -> 0 + no borrow
if (N0 == N1)
return CombineTo(N, DAG.getConstant(0, DL, VT),
DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
// fold (subc x, 0) -> x + no borrow
if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
if (isAllOnesConstant(N0))
return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
return SDValue();
}
SDValue DAGCombiner::visitSUBO(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
bool IsSigned = (ISD::SSUBO == N->getOpcode());
EVT CarryVT = N->getValueType(1);
SDLoc DL(N);
// If the flag result is dead, turn this into an SUB.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
DAG.getUNDEF(CarryVT));
// fold (subo x, x) -> 0 + no borrow
if (N0 == N1)
return CombineTo(N, DAG.getConstant(0, DL, VT),
DAG.getConstant(0, DL, CarryVT));
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
// fold (subox, c) -> (addo x, -c)
if (IsSigned && N1C && !N1C->getAPIntValue().isMinSignedValue()) {
return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0,
DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
}
// fold (subo x, 0) -> x + no borrow
if (isNullOrNullSplat(N1))
return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
// Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow
if (!IsSigned && isAllOnesOrAllOnesSplat(N0))
return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
DAG.getConstant(0, DL, CarryVT));
return SDValue();
}
SDValue DAGCombiner::visitSUBE(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
// fold (sube x, y, false) -> (subc x, y)
if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);
return SDValue();
}
SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
// fold (subcarry x, y, false) -> (usubo x, y)
if (isNullConstant(CarryIn)) {
if (!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::USUBO, N->getValueType(0)))
return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1);
}
return SDValue();
}
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold (mul x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, SDLoc(N), VT);
bool N0IsConst = false;
bool N1IsConst = false;
bool N1IsOpaqueConst = false;
bool N0IsOpaqueConst = false;
APInt ConstValue0, ConstValue1;
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0);
N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1);
assert((!N0IsConst ||
ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) &&
"Splat APInt should be element width");
assert((!N1IsConst ||
ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) &&
"Splat APInt should be element width");
} else {
N0IsConst = isa<ConstantSDNode>(N0);
if (N0IsConst) {
ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque();
}
N1IsConst = isa<ConstantSDNode>(N1);
if (N1IsConst) {
ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
}
}
// fold (mul c1, c2) -> c1*c2
if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst)
return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT,
N0.getNode(), N1.getNode());
// canonicalize constant to RHS (vector doesn't have to splat)
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
// fold (mul x, 0) -> 0
if (N1IsConst && ConstValue1.isNullValue())
return N1;
// fold (mul x, 1) -> x
if (N1IsConst && ConstValue1.isOneValue())
return N0;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// fold (mul x, -1) -> 0-x
if (N1IsConst && ConstValue1.isAllOnesValue()) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), N0);
}
// fold (mul x, (1 << c)) -> x << c
if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N1) &&
(!VT.isVector() || Level <= AfterLegalizeVectorOps)) {
SDLoc DL(N);
SDValue LogBase2 = BuildLogBase2(N1, DL);
EVT ShiftVT = getShiftAmountTy(N0.getValueType());
SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
return DAG.getNode(ISD::SHL, DL, VT, N0, Trunc);
}
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2()) {
unsigned Log2Val = (-ConstValue1).logBase2();
SDLoc DL(N);
// FIXME: If the input is something that is easily negated (e.g. a
// single-use add), we should put the negate there.
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT),
DAG.getNode(ISD::SHL, DL, VT, N0,
DAG.getConstant(Log2Val, DL,
getShiftAmountTy(N0.getValueType()))));
}
// Try to transform multiply-by-(power-of-2 +/- 1) into shift and add/sub.
// mul x, (2^N + 1) --> add (shl x, N), x
// mul x, (2^N - 1) --> sub (shl x, N), x
// Examples: x * 33 --> (x << 5) + x
// x * 15 --> (x << 4) - x
// x * -33 --> -((x << 5) + x)
// x * -15 --> -((x << 4) - x) ; this reduces --> x - (x << 4)
if (N1IsConst && TLI.decomposeMulByConstant(VT, N1)) {
// TODO: We could handle more general decomposition of any constant by
// having the target set a limit on number of ops and making a
// callback to determine that sequence (similar to sqrt expansion).
unsigned MathOp = ISD::DELETED_NODE;
APInt MulC = ConstValue1.abs();
if ((MulC - 1).isPowerOf2())
MathOp = ISD::ADD;
else if ((MulC + 1).isPowerOf2())
MathOp = ISD::SUB;
if (MathOp != ISD::DELETED_NODE) {
unsigned ShAmt =
MathOp == ISD::ADD ? (MulC - 1).logBase2() : (MulC + 1).logBase2();
assert(ShAmt < VT.getScalarSizeInBits() &&
"multiply-by-constant generated out of bounds shift");
SDLoc DL(N);
SDValue Shl =
DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShAmt, DL, VT));
SDValue R = DAG.getNode(MathOp, DL, VT, Shl, N0);
if (ConstValue1.isNegative())
R = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), R);
return R;
}
}
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
if (N0.getOpcode() == ISD::SHL &&
isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1));
if (isConstantOrConstantVector(C3))
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3);
}
// Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
// use.
{
SDValue Sh(nullptr, 0), Y(nullptr, 0);
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
if (N0.getOpcode() == ISD::SHL &&
isConstantOrConstantVector(N0.getOperand(1)) &&
N0.getNode()->hasOneUse()) {
Sh = N0; Y = N1;
} else if (N1.getOpcode() == ISD::SHL &&
isConstantOrConstantVector(N1.getOperand(1)) &&
N1.getNode()->hasOneUse()) {
Sh = N1; Y = N0;
}
if (Sh.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y);
return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1));
}
}
// fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
if (DAG.isConstantIntBuildVectorOrConstantInt(N1) &&
N0.getOpcode() == ISD::ADD &&
DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) &&
isMulAddWithConstProfitable(N, N0, N1))
return DAG.getNode(ISD::ADD, SDLoc(N), VT,
DAG.getNode(ISD::MUL, SDLoc(N0), VT,
N0.getOperand(0), N1),
DAG.getNode(ISD::MUL, SDLoc(N1), VT,
N0.getOperand(1), N1));
// reassociate mul
if (SDValue RMUL = reassociateOps(ISD::MUL, SDLoc(N), N0, N1, N->getFlags()))
return RMUL;
return SDValue();
}
/// Return true if divmod libcall is available.
static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
const TargetLowering &TLI) {
RTLIB::Libcall LC;
EVT NodeType = Node->getValueType(0);
if (!NodeType.isSimple())
return false;
switch (NodeType.getSimpleVT().SimpleTy) {
default: return false; // No libcall for vector types.
case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
}
return TLI.getLibcallName(LC) != nullptr;
}
/// Issue divrem if both quotient and remainder are needed.
SDValue DAGCombiner::useDivRem(SDNode *Node) {
if (Node->use_empty())
return SDValue(); // This is a dead node, leave it alone.
unsigned Opcode = Node->getOpcode();
bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM);
unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
// DivMod lib calls can still work on non-legal types if using lib-calls.
EVT VT = Node->getValueType(0);
if (VT.isVector() || !VT.isInteger())
return SDValue();
if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT))
return SDValue();
// If DIVREM is going to get expanded into a libcall,
// but there is no libcall available, then don't combine.
if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) &&
!isDivRemLibcallAvailable(Node, isSigned, TLI))
return SDValue();
// If div is legal, it's better to do the normal expansion
unsigned OtherOpcode = 0;
if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) {
OtherOpcode = isSigned ? ISD::SREM : ISD::UREM;
if (TLI.isOperationLegalOrCustom(Opcode, VT))
return SDValue();
} else {
OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
if (TLI.isOperationLegalOrCustom(OtherOpcode, VT))
return SDValue();
}
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
SDValue combined;
for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (User == Node || User->getOpcode() == ISD::DELETED_NODE ||
User->use_empty())
continue;
// Convert the other matching node(s), too;
// otherwise, the DIVREM may get target-legalized into something
// target-specific that we won't be able to recognize.
unsigned UserOpc = User->getOpcode();
if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) &&
User->getOperand(0) == Op0 &&
User->getOperand(1) == Op1) {
if (!combined) {
if (UserOpc == OtherOpcode) {
SDVTList VTs = DAG.getVTList(VT, VT);
combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1);
} else if (UserOpc == DivRemOpc) {
combined = SDValue(User, 0);
} else {
assert(UserOpc == Opcode);
continue;
}
}
if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV)
CombineTo(User, combined);
else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM)
CombineTo(User, combined.getValue(1));
}
}
return combined;
}
static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
unsigned Opc = N->getOpcode();
bool IsDiv = (ISD::SDIV == Opc) || (ISD::UDIV == Opc);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
// X / undef -> undef
// X % undef -> undef
// X / 0 -> undef
// X % 0 -> undef
// NOTE: This includes vectors where any divisor element is zero/undef.
if (DAG.isUndef(Opc, {N0, N1}))
return DAG.getUNDEF(VT);
// undef / X -> 0
// undef % X -> 0
if (N0.isUndef())
return DAG.getConstant(0, DL, VT);
// 0 / X -> 0
// 0 % X -> 0
ConstantSDNode *N0C = isConstOrConstSplat(N0);
if (N0C && N0C->isNullValue())
return N0;
// X / X -> 1
// X % X -> 0
if (N0 == N1)
return DAG.getConstant(IsDiv ? 1 : 0, DL, VT);
// X / 1 -> X
// X % 1 -> 0
// If this is a boolean op (single-bit element type), we can't have
// division-by-zero or remainder-by-zero, so assume the divisor is 1.
// TODO: Similarly, if we're zero-extending a boolean divisor, then assume
// it's a 1.
if ((N1C && N1C->isOne()) || (VT.getScalarType() == MVT::i1))
return IsDiv ? N0 : DAG.getConstant(0, DL, VT);
return SDValue();
}
SDValue DAGCombiner::visitSDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT CCVT = getSetCCResultType(VT);
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
SDLoc DL(N);
// fold (sdiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C);
// fold (sdiv X, -1) -> 0-X
if (N1C && N1C->isAllOnesValue())
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0);
// fold (sdiv X, MIN_SIGNED) -> select(X == MIN_SIGNED, 1, 0)
if (N1C && N1C->getAPIntValue().isMinSignedValue())
return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT));
if (SDValue V = simplifyDivRem(N, DAG))
return V;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// If we know the sign bits of both operands are zero, strength reduce to a
// udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1);
if (SDValue V = visitSDIVLike(N0, N1, N)) {
// If the corresponding remainder node exists, update its users with
// (Dividend - (Quotient * Divisor).
if (SDNode *RemNode = DAG.getNodeIfExists(ISD::SREM, N->getVTList(),
{ N0, N1 })) {
SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
AddToWorklist(Mul.getNode());
AddToWorklist(Sub.getNode());
CombineTo(RemNode, Sub);
}
return V;
}
// sdiv, srem -> sdivrem
// If the divisor is constant, then return DIVREM only if isIntDivCheap() is
// true. Otherwise, we break the simplification logic in visitREM().
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue DivRem = useDivRem(N))
return DivRem;
return SDValue();
}
SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
EVT CCVT = getSetCCResultType(VT);
unsigned BitWidth = VT.getScalarSizeInBits();
// Helper for determining whether a value is a power-2 constant scalar or a
// vector of such elements.
auto IsPowerOfTwo = [](ConstantSDNode *C) {
if (C->isNullValue() || C->isOpaque())
return false;
if (C->getAPIntValue().isPowerOf2())
return true;
if ((-C->getAPIntValue()).isPowerOf2())
return true;
return false;
};
// fold (sdiv X, pow2) -> simple ops after legalize
// FIXME: We check for the exact bit here because the generic lowering gives
// better results in that case. The target-specific lowering should learn how
// to handle exact sdivs efficiently.
if (!N->getFlags().hasExact() && ISD::matchUnaryPredicate(N1, IsPowerOfTwo)) {
// Target-specific implementation of sdiv x, pow2.
if (SDValue Res = BuildSDIVPow2(N))
return Res;
// Create constants that are functions of the shift amount value.
EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
SDValue Bits = DAG.getConstant(BitWidth, DL, ShiftAmtTy);
SDValue C1 = DAG.getNode(ISD::CTTZ, DL, VT, N1);
C1 = DAG.getZExtOrTrunc(C1, DL, ShiftAmtTy);
SDValue Inexact = DAG.getNode(ISD::SUB, DL, ShiftAmtTy, Bits, C1);
if (!isConstantOrConstantVector(Inexact))
return SDValue();
// Splat the sign bit into the register
SDValue Sign = DAG.getNode(ISD::SRA, DL, VT, N0,
DAG.getConstant(BitWidth - 1, DL, ShiftAmtTy));
AddToWorklist(Sign.getNode());
// Add (N0 < 0) ? abs2 - 1 : 0;
SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, Sign, Inexact);
AddToWorklist(Srl.getNode());
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Srl);
AddToWorklist(Add.getNode());
SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1);
AddToWorklist(Sra.getNode());
// Special case: (sdiv X, 1) -> X
// Special Case: (sdiv X, -1) -> 0-X
SDValue One = DAG.getConstant(1, DL, VT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ);
SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes);
Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra);
// If dividing by a positive value, we're done. Otherwise, the result must
// be negated.
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, Zero, Sra);
// FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding.
SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT);
SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra);
return Res;
}
// If integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence. Targets may check function attributes for size/speed
// trade-offs.
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isConstantOrConstantVector(N1) &&
!TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue Op = BuildSDIV(N))
return Op;
return SDValue();
}
SDValue DAGCombiner::visitUDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT CCVT = getSetCCResultType(VT);
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
SDLoc DL(N);
// fold (udiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT,
N0C, N1C))
return Folded;
// fold (udiv X, -1) -> select(X == -1, 1, 0)
if (N1C && N1C->getAPIntValue().isAllOnesValue())
return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT));
if (SDValue V = simplifyDivRem(N, DAG))
return V;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if (SDValue V = visitUDIVLike(N0, N1, N)) {
// If the corresponding remainder node exists, update its users with
// (Dividend - (Quotient * Divisor).
if (SDNode *RemNode = DAG.getNodeIfExists(ISD::UREM, N->getVTList(),
{ N0, N1 })) {
SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
AddToWorklist(Mul.getNode());
AddToWorklist(Sub.getNode());
CombineTo(RemNode, Sub);
}
return V;
}
// sdiv, srem -> sdivrem
// If the divisor is constant, then return DIVREM only if isIntDivCheap() is
// true. Otherwise, we break the simplification logic in visitREM().
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue DivRem = useDivRem(N))
return DivRem;
return SDValue();
}
SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
// fold (udiv x, (1 << c)) -> x >>u c
if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N1)) {
SDValue LogBase2 = BuildLogBase2(N1, DL);
AddToWorklist(LogBase2.getNode());
EVT ShiftVT = getShiftAmountTy(N0.getValueType());
SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
AddToWorklist(Trunc.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
}
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
SDValue N10 = N1.getOperand(0);
if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N10)) {
SDValue LogBase2 = BuildLogBase2(N10, DL);
AddToWorklist(LogBase2.getNode());
EVT ADDVT = N1.getOperand(1).getValueType();
SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT);
AddToWorklist(Trunc.getNode());
SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
}
}
// fold (udiv x, c) -> alternate
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isConstantOrConstantVector(N1) &&
!TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue Op = BuildUDIV(N))
return Op;
return SDValue();
}
// handles ISD::SREM and ISD::UREM
SDValue DAGCombiner::visitREM(SDNode *N) {
unsigned Opcode = N->getOpcode();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT CCVT = getSetCCResultType(VT);
bool isSigned = (Opcode == ISD::SREM);
SDLoc DL(N);
// fold (rem c1, c2) -> c1%c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C))
return Folded;
// fold (urem X, -1) -> select(X == -1, 0, x)
if (!isSigned && N1C && N1C->getAPIntValue().isAllOnesValue())
return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
DAG.getConstant(0, DL, VT), N0);
if (SDValue V = simplifyDivRem(N, DAG))
return V;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if (isSigned) {
// If we know the sign bits of both operands are zero, strength reduce to a
// urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UREM, DL, VT, N0, N1);
} else {
SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
if (DAG.isKnownToBeAPowerOfTwo(N1)) {
// fold (urem x, pow2) -> (and x, pow2-1)
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
if (N1.getOpcode() == ISD::SHL &&
DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
}
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
// If X/C can be simplified by the division-by-constant logic, lower
// X%C to the equivalent of X-X/C*C.
// Reuse the SDIVLike/UDIVLike combines - to avoid mangling nodes, the
// speculative DIV must not cause a DIVREM conversion. We guard against this
// by skipping the simplification if isIntDivCheap(). When div is not cheap,
// combine will not return a DIVREM. Regardless, checking cheapness here
// makes sense since the simplification results in fatter code.
if (DAG.isKnownNeverZero(N1) && !TLI.isIntDivCheap(VT, Attr)) {
SDValue OptimizedDiv =
isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
if (OptimizedDiv.getNode()) {
// If the equivalent Div node also exists, update its users.
unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
if (SDNode *DivNode = DAG.getNodeIfExists(DivOpcode, N->getVTList(),
{ N0, N1 }))
CombineTo(DivNode, OptimizedDiv);
SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
AddToWorklist(OptimizedDiv.getNode());
AddToWorklist(Mul.getNode());
return Sub;
}
}
// sdiv, srem -> sdivrem
if (SDValue DivRem = useDivRem(N))
return DivRem.getValue(1);
return SDValue();
}
SDValue DAGCombiner::visitMULHS(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
if (VT.isVector()) {
// fold (mulhs x, 0) -> 0
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N0;
}
// fold (mulhs x, 0) -> 0
if (isNullConstant(N1))
return N1;
// fold (mulhs x, 1) -> (sra x, size(x)-1)
if (isOneConstant(N1))
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
DAG.getConstant(N0.getValueSizeInBits() - 1, DL,
getShiftAmountTy(N0.getValueType())));
// fold (mulhs x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
// If the type twice as wide is legal, transform the mulhs to a wider multiply
// plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
SDValue DAGCombiner::visitMULHU(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
if (VT.isVector()) {
// fold (mulhu x, 0) -> 0
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N0;
}
// fold (mulhu x, 0) -> 0
if (isNullConstant(N1))
return N1;
// fold (mulhu x, 1) -> 0
if (isOneConstant(N1))
return DAG.getConstant(0, DL, N0.getValueType());
// fold (mulhu x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
// fold (mulhu x, (1 << c)) -> x >> (bitwidth - c)
if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N1) && hasOperation(ISD::SRL, VT)) {
unsigned NumEltBits = VT.getScalarSizeInBits();
SDValue LogBase2 = BuildLogBase2(N1, DL);
SDValue SRLAmt = DAG.getNode(
ISD::SUB, DL, VT, DAG.getConstant(NumEltBits, DL, VT), LogBase2);
EVT ShiftVT = getShiftAmountTy(N0.getValueType());
SDValue Trunc = DAG.getZExtOrTrunc(SRLAmt, DL, ShiftVT);
return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
}
// If the type twice as wide is legal, transform the mulhu to a wider multiply
// plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
/// give the opcodes for the two computations that are being performed. Return
/// true if a simplification was made.
SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp) {
// If the high half is not needed, just compute the low half.
bool HiExists = N->hasAnyUseOfValue(1);
if (!HiExists && (!LegalOperations ||
TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
return CombineTo(N, Res, Res);
}
// If the low half is not needed, just compute the high half.
bool LoExists = N->hasAnyUseOfValue(0);
if (!LoExists && (!LegalOperations ||
TLI.isOperationLegalOrCustom(HiOp, N->getValueType(1)))) {
SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
return CombineTo(N, Res, Res);
}
// If both halves are used, return as it is.
if (LoExists && HiExists)
return SDValue();
// If the two computed results can be simplified separately, separate them.
if (LoExists) {
SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
AddToWorklist(Lo.getNode());
SDValue LoOpt = combine(Lo.getNode());
if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(LoOpt.getOpcode(), LoOpt.getValueType())))
return CombineTo(N, LoOpt, LoOpt);
}
if (HiExists) {
SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
AddToWorklist(Hi.getNode());
SDValue HiOpt = combine(Hi.getNode());
if (HiOpt.getNode() && HiOpt != Hi &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(HiOpt.getOpcode(), HiOpt.getValueType())))
return CombineTo(N, HiOpt, HiOpt);
}
return SDValue();
}
SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
// If the type is twice as wide is legal, transform the mulhu to a wider
// multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
// If the type is twice as wide is legal, transform the mulhu to a wider
// multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
SDValue DAGCombiner::visitMULO(SDNode *N) {
bool IsSigned = (ISD::SMULO == N->getOpcode());
// (mulo x, 2) -> (addo x, x)
if (ConstantSDNode *C2 = isConstOrConstSplat(N->getOperand(1)))
if (C2->getAPIntValue() == 2)
return DAG.getNode(IsSigned ? ISD::SADDO : ISD::UADDO, SDLoc(N),
N->getVTList(), N->getOperand(0), N->getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold operation with constant operands.
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
// Is sign bits are zero, flip between UMIN/UMAX and SMIN/SMAX.
// Only do this if the current op isn't legal and the flipped is.
unsigned Opcode = N->getOpcode();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isOperationLegal(Opcode, VT) &&
(N0.isUndef() || DAG.SignBitIsZero(N0)) &&
(N1.isUndef() || DAG.SignBitIsZero(N1))) {
unsigned AltOpcode;
switch (Opcode) {
case ISD::SMIN: AltOpcode = ISD::UMIN; break;
case ISD::SMAX: AltOpcode = ISD::UMAX; break;
case ISD::UMIN: AltOpcode = ISD::SMIN; break;
case ISD::UMAX: AltOpcode = ISD::SMAX; break;
default: llvm_unreachable("Unknown MINMAX opcode");
}
if (TLI.isOperationLegal(AltOpcode, VT))
return DAG.getNode(AltOpcode, SDLoc(N), VT, N0, N1);
}
return SDValue();
}
/// If this is a bitwise logic instruction and both operands have the same
/// opcode, try to sink the other opcode after the logic instruction.
SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned LogicOpcode = N->getOpcode();
unsigned HandOpcode = N0.getOpcode();
assert((LogicOpcode == ISD::AND || LogicOpcode == ISD::OR ||
LogicOpcode == ISD::XOR) && "Expected logic opcode");
assert(HandOpcode == N1.getOpcode() && "Bad input!");
// Bail early if none of these transforms apply.
if (N0.getNumOperands() == 0)
return SDValue();
// FIXME: We should check number of uses of the operands to not increase
// the instruction count for all transforms.
// Handle size-changing casts.
SDValue X = N0.getOperand(0);
SDValue Y = N1.getOperand(0);
EVT XVT = X.getValueType();
SDLoc DL(N);
if (HandOpcode == ISD::ANY_EXTEND || HandOpcode == ISD::ZERO_EXTEND ||
HandOpcode == ISD::SIGN_EXTEND) {
// If both operands have other uses, this transform would create extra
// instructions without eliminating anything.
if (!N0.hasOneUse() && !N1.hasOneUse())
return SDValue();
// We need matching integer source types.
if (XVT != Y.getValueType())
return SDValue();
// Don't create an illegal op during or after legalization. Don't ever
// create an unsupported vector op.
if ((VT.isVector() || LegalOperations) &&
!TLI.isOperationLegalOrCustom(LogicOpcode, XVT))
return SDValue();
// Avoid infinite looping with PromoteIntBinOp.
// TODO: Should we apply desirable/legal constraints to all opcodes?
if (HandOpcode == ISD::ANY_EXTEND && LegalTypes &&
!TLI.isTypeDesirableForOp(LogicOpcode, XVT))
return SDValue();
// logic_op (hand_op X), (hand_op Y) --> hand_op (logic_op X, Y)
SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
return DAG.getNode(HandOpcode, DL, VT, Logic);
}
// logic_op (truncate x), (truncate y) --> truncate (logic_op x, y)
if (HandOpcode == ISD::TRUNCATE) {
// If both operands have other uses, this transform would create extra
// instructions without eliminating anything.
if (!N0.hasOneUse() && !N1.hasOneUse())
return SDValue();
// We need matching source types.
if (XVT != Y.getValueType())
return SDValue();
// Don't create an illegal op during or after legalization.
if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
return SDValue();
// Be extra careful sinking truncate. If it's free, there's no benefit in
// widening a binop. Also, don't create a logic op on an illegal type.
if (TLI.isZExtFree(VT, XVT) && TLI.isTruncateFree(XVT, VT))
return SDValue();
if (!TLI.isTypeLegal(XVT))
return SDValue();
SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
return DAG.getNode(HandOpcode, DL, VT, Logic);
}
// For binops SHL/SRL/SRA/AND:
// logic_op (OP x, z), (OP y, z) --> OP (logic_op x, y), z
if ((HandOpcode == ISD::SHL || HandOpcode == ISD::SRL ||
HandOpcode == ISD::SRA || HandOpcode == ISD::AND) &&
N0.getOperand(1) == N1.getOperand(1)) {
// If either operand has other uses, this transform is not an improvement.
if (!N0.hasOneUse() || !N1.hasOneUse())
return SDValue();
SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
return DAG.getNode(HandOpcode, DL, VT, Logic, N0.getOperand(1));
}
// Unary ops: logic_op (bswap x), (bswap y) --> bswap (logic_op x, y)
if (HandOpcode == ISD::BSWAP) {
// If either operand has other uses, this transform is not an improvement.
if (!N0.hasOneUse() || !N1.hasOneUse())
return SDValue();
SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
return DAG.getNode(HandOpcode, DL, VT, Logic);
}
// Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
// Only perform this optimization up until type legalization, before
// LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
// adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
// we don't want to undo this promotion.
// We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
// on scalars.
if ((HandOpcode == ISD::BITCAST || HandOpcode == ISD::SCALAR_TO_VECTOR) &&
Level <= AfterLegalizeTypes) {
// Input types must be integer and the same.
if (XVT.isInteger() && XVT == Y.getValueType()) {
SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
return DAG.getNode(HandOpcode, DL, VT, Logic);
}
}
// Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
// Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
// If both shuffles use the same mask, and both shuffle within a single
// vector, then it is worthwhile to move the swizzle after the operation.
// The type-legalizer generates this pattern when loading illegal
// vector types from memory. In many cases this allows additional shuffle
// optimizations.
// There are other cases where moving the shuffle after the xor/and/or
// is profitable even if shuffles don't perform a swizzle.
// If both shuffles use the same mask, and both shuffles have the same first
// or second operand, then it might still be profitable to move the shuffle
// after the xor/and/or operation.
if (HandOpcode == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
auto *SVN0 = cast<ShuffleVectorSDNode>(N0);
auto *SVN1 = cast<ShuffleVectorSDNode>(N1);
assert(X.getValueType() == Y.getValueType() &&
"Inputs to shuffles are not the same type");
// Check that both shuffles use the same mask. The masks are known to be of
// the same length because the result vector type is the same.
// Check also that shuffles have only one use to avoid introducing extra
// instructions.
if (!SVN0->hasOneUse() || !SVN1->hasOneUse() ||
!SVN0->getMask().equals(SVN1->getMask()))
return SDValue();
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
SDValue ShOp = N0.getOperand(1);
if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
// (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C)
if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
SDValue Logic = DAG.getNode(LogicOpcode, DL, VT,
N0.getOperand(0), N1.getOperand(0));
return DAG.getVectorShuffle(VT, DL, Logic, ShOp, SVN0->getMask());
}
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
ShOp = N0.getOperand(0);
if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
// (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B))
if (N0.getOperand(0) == N1.getOperand(0) && ShOp.getNode()) {
SDValue Logic = DAG.getNode(LogicOpcode, DL, VT, N0.getOperand(1),
N1.getOperand(1));
return DAG.getVectorShuffle(VT, DL, ShOp, Logic, SVN0->getMask());
}
}
return SDValue();
}
/// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient.
SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
const SDLoc &DL) {
SDValue LL, LR, RL, RR, N0CC, N1CC;
if (!isSetCCEquivalent(N0, LL, LR, N0CC) ||
!isSetCCEquivalent(N1, RL, RR, N1CC))
return SDValue();
assert(N0.getValueType() == N1.getValueType() &&
"Unexpected operand types for bitwise logic op");
assert(LL.getValueType() == LR.getValueType() &&
RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc");
// If we're here post-legalization or the logic op type is not i1, the logic
// op type must match a setcc result type. Also, all folds require new
// operations on the left and right operands, so those types must match.
EVT VT = N0.getValueType();
EVT OpVT = LL.getValueType();
if (LegalOperations || VT.getScalarType() != MVT::i1)
if (VT != getSetCCResultType(OpVT))
return SDValue();
if (OpVT != RL.getValueType())
return SDValue();
ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get();
ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get();
bool IsInteger = OpVT.isInteger();
if (LR == RR && CC0 == CC1 && IsInteger) {
bool IsZero = isNullOrNullSplat(LR);
bool IsNeg1 = isAllOnesOrAllOnesSplat(LR);
// All bits clear?
bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero;
// All sign bits clear?
bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1;
// Any bits set?
bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero;
// Any sign bits set?
bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero;
// (and (seteq X, 0), (seteq Y, 0)) --> (seteq (or X, Y), 0)
// (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1)
// (or (setne X, 0), (setne Y, 0)) --> (setne (or X, Y), 0)
// (or (setlt X, 0), (setlt Y, 0)) --> (setlt (or X, Y), 0)
if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) {
SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL);
AddToWorklist(Or.getNode());
return DAG.getSetCC(DL, VT, Or, LR, CC1);
}
// All bits set?
bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1;
// All sign bits set?
bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero;
// Any bits clear?
bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1;
// Any sign bits clear?
bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1;
// (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1)
// (and (setlt X, 0), (setlt Y, 0)) --> (setlt (and X, Y), 0)
// (or (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1)
// (or (setgt X, -1), (setgt Y -1)) --> (setgt (and X, Y), -1)
if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) {
SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL);
AddToWorklist(And.getNode());
return DAG.getSetCC(DL, VT, And, LR, CC1);
}
}
// TODO: What is the 'or' equivalent of this fold?
// (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2)
if (IsAnd && LL == RL && CC0 == CC1 && OpVT.getScalarSizeInBits() > 1 &&
IsInteger && CC0 == ISD::SETNE &&
((isNullConstant(LR) && isAllOnesConstant(RR)) ||
(isAllOnesConstant(LR) && isNullConstant(RR)))) {
SDValue One = DAG.getConstant(1, DL, OpVT);
SDValue Two = DAG.getConstant(2, DL, OpVT);
SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One);
AddToWorklist(Add.getNode());
return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE);
}
// Try more general transforms if the predicates match and the only user of
// the compares is the 'and' or 'or'.
if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 &&
N0.hasOneUse() && N1.hasOneUse()) {
// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
// or (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0
if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) {
SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR);
SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR);
SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR);
SDValue Zero = DAG.getConstant(0, DL, OpVT);
return DAG.getSetCC(DL, VT, Or, Zero, CC1);
}
// Turn compare of constants whose difference is 1 bit into add+and+setcc.
// TODO - support non-uniform vector amounts.
if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) {
// Match a shared variable operand and 2 non-opaque constant operands.
ConstantSDNode *C0 = isConstOrConstSplat(LR);
ConstantSDNode *C1 = isConstOrConstSplat(RR);
if (LL == RL && C0 && C1 && !C0->isOpaque() && !C1->isOpaque()) {
// Canonicalize larger constant as C0.
if (C1->getAPIntValue().ugt(C0->getAPIntValue()))
std::swap(C0, C1);
// The difference of the constants must be a single bit.
const APInt &C0Val = C0->getAPIntValue();
const APInt &C1Val = C1->getAPIntValue();
if ((C0Val - C1Val).isPowerOf2()) {
// and/or (setcc X, C0, ne), (setcc X, C1, ne/eq) -->
// setcc ((add X, -C1), ~(C0 - C1)), 0, ne/eq
SDValue OffsetC = DAG.getConstant(-C1Val, DL, OpVT);
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LL, OffsetC);
SDValue MaskC = DAG.getConstant(~(C0Val - C1Val), DL, OpVT);
SDValue And = DAG.getNode(ISD::AND, DL, OpVT, Add, MaskC);
SDValue Zero = DAG.getConstant(0, DL, OpVT);
return DAG.getSetCC(DL, VT, And, Zero, CC0);
}
}
}
}
// Canonicalize equivalent operands to LL == RL.
if (LL == RR && LR == RL) {
CC1 = ISD::getSetCCSwappedOperands(CC1);
std::swap(RL, RR);
}
// (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
// (or (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
if (LL == RL && LR == RR) {
ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, IsInteger)
: ISD::getSetCCOrOperation(CC0, CC1, IsInteger);
if (NewCC != ISD::SETCC_INVALID &&
(!LegalOperations ||
(TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) &&
TLI.isOperationLegal(ISD::SETCC, OpVT))))
return DAG.getSetCC(DL, VT, LL, LR, NewCC);
}
return SDValue();
}
/// This contains all DAGCombine rules which reduce two values combined by
/// an And operation to a single value. This makes them reusable in the context
/// of visitSELECT(). Rules involving constants are not included as
/// visitSELECT() already handles those cases.
SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
EVT VT = N1.getValueType();
SDLoc DL(N);
// fold (and x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
return V;
if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
VT.getSizeInBits() <= 64) {
if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
// Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
// immediate for an add, but it is legal if its top c2 bits are set,
// transform the ADD so the immediate doesn't need to be materialized
// in a register.
APInt ADDC = ADDI->getAPIntValue();
APInt SRLC = SRLI->getAPIntValue();
if (ADDC.getMinSignedBits() <= 64 &&
SRLC.ult(VT.getSizeInBits()) &&
!TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
SRLC.getZExtValue());
if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
ADDC |= Mask;
if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
SDLoc DL0(N0);
SDValue NewAdd =
DAG.getNode(ISD::ADD, DL0, VT,
N0.getOperand(0), DAG.getConstant(ADDC, DL, VT));
CombineTo(N0.getNode(), NewAdd);
// Return N so it doesn't get rechecked!
return SDValue(N, 0);
}
}
}
}
}
}
// Reduce bit extract of low half of an integer to the narrower type.
// (and (srl i64:x, K), KMask) ->
// (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask)
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
unsigned Size = VT.getSizeInBits();
const APInt &AndMask = CAnd->getAPIntValue();
unsigned ShiftBits = CShift->getZExtValue();
// Bail out, this node will probably disappear anyway.
if (ShiftBits == 0)
return SDValue();
unsigned MaskBits = AndMask.countTrailingOnes();
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2);
if (AndMask.isMask() &&
// Required bits must not span the two halves of the integer and
// must fit in the half size type.
(ShiftBits + MaskBits <= Size / 2) &&
TLI.isNarrowingProfitable(VT, HalfVT) &&
TLI.isTypeDesirableForOp(ISD::AND, HalfVT) &&
TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) &&
TLI.isTruncateFree(VT, HalfVT) &&
TLI.isZExtFree(HalfVT, VT)) {
// The isNarrowingProfitable is to avoid regressions on PPC and
// AArch64 which match a few 64-bit bit insert / bit extract patterns
// on downstream users of this. Those patterns could probably be
// extended to handle extensions mixed in.
SDValue SL(N0);
assert(MaskBits <= Size);
// Extracting the highest bit of the low half.
EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout());
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT,
N0.getOperand(0));
SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT);
SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT);
SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK);
SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask);
return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And);
}
}
}
}
return SDValue();
}
bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
EVT LoadResultTy, EVT &ExtVT) {
if (!AndC->getAPIntValue().isMask())
return false;
unsigned ActiveBits = AndC->getAPIntValue().countTrailingOnes();
ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
EVT LoadedVT = LoadN->getMemoryVT();
if (ExtVT == LoadedVT &&
(!LegalOperations ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) {
// ZEXTLOAD will match without needing to change the size of the value being
// loaded.
return true;
}
// Do not change the width of a volatile load.
if (LoadN->isVolatile())
return false;
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound())
return false;
if (LegalOperations &&
!TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))
return false;
if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT))
return false;
return true;
}
bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
ISD::LoadExtType ExtType, EVT &MemVT,
unsigned ShAmt) {
if (!LDST)
return false;
// Only allow byte offsets.
if (ShAmt % 8)
return false;
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (!MemVT.isRound())
return false;
// Don't change the width of a volatile load.
if (LDST->isVolatile())
return false;
// Verify that we are actually reducing a load width here.
if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
return false;
// Ensure that this isn't going to produce an unsupported unaligned access.
if (ShAmt &&
!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
LDST->getAddressSpace(), ShAmt / 8,
LDST->getMemOperand()->getFlags()))
return false;
// It's not possible to generate a constant of extended or untyped type.
EVT PtrType = LDST->getBasePtr().getValueType();
if (PtrType == MVT::Untyped || PtrType.isExtended())
return false;
if (isa<LoadSDNode>(LDST)) {
LoadSDNode *Load = cast<LoadSDNode>(LDST);
// Don't transform one with multiple uses, this would require adding a new
// load.
if (!SDValue(Load, 0).hasOneUse())
return false;
if (LegalOperations &&
!TLI.isLoadExtLegal(ExtType, Load->getValueType(0), MemVT))
return false;
// For the transform to be legal, the load must produce only two values
// (the value loaded and the chain). Don't transform a pre-increment
// load, for example, which produces an extra value. Otherwise the
// transformation is not equivalent, and the downstream logic to replace
// uses gets things wrong.
if (Load->getNumValues() > 2)
return false;
// If the load that we're shrinking is an extload and we're not just
// discarding the extension we can't simply shrink the load. Bail.
// TODO: It would be possible to merge the extensions in some cases.
if (Load->getExtensionType() != ISD::NON_EXTLOAD &&
Load->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
return false;
if (!TLI.shouldReduceLoadWidth(Load, ExtType, MemVT))
return false;
} else {
assert(isa<StoreSDNode>(LDST) && "It is not a Load nor a Store SDNode");
StoreSDNode *Store = cast<StoreSDNode>(LDST);
// Can't write outside the original store
if (Store->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
return false;
if (LegalOperations &&
!TLI.isTruncStoreLegal(Store->getValue().getValueType(), MemVT))
return false;
}
return true;
}
bool DAGCombiner::SearchForAndLoads(SDNode *N,
SmallVectorImpl<LoadSDNode*> &Loads,
SmallPtrSetImpl<SDNode*> &NodesWithConsts,
ConstantSDNode *Mask,
SDNode *&NodeToMask) {
// Recursively search for the operands, looking for loads which can be
// narrowed.
for (SDValue Op : N->op_values()) {
if (Op.getValueType().isVector())
return false;
// Some constants may need fixing up later if they are too large.
if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
if ((N->getOpcode() == ISD::OR || N->getOpcode() == ISD::XOR) &&
(Mask->getAPIntValue() & C->getAPIntValue()) != C->getAPIntValue())
NodesWithConsts.insert(N);
continue;
}
if (!Op.hasOneUse())
return false;
switch(Op.getOpcode()) {
case ISD::LOAD: {
auto *Load = cast<LoadSDNode>(Op);
EVT ExtVT;
if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
isLegalNarrowLdSt(Load, ISD::ZEXTLOAD, ExtVT)) {
// ZEXTLOAD is already small enough.
if (Load->getExtensionType() == ISD::ZEXTLOAD &&
ExtVT.bitsGE(Load->getMemoryVT()))
continue;
// Use LE to convert equal sized loads to zext.
if (ExtVT.bitsLE(Load->getMemoryVT()))
Loads.push_back(Load);
continue;
}
return false;
}
case ISD::ZERO_EXTEND:
case ISD::AssertZext: {
unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
EVT VT = Op.getOpcode() == ISD::AssertZext ?
cast<VTSDNode>(Op.getOperand(1))->getVT() :
Op.getOperand(0).getValueType();
// We can accept extending nodes if the mask is wider or an equal
// width to the original type.
if (ExtVT.bitsGE(VT))
continue;
break;
}
case ISD::OR:
case ISD::XOR:
case ISD::AND:
if (!SearchForAndLoads(Op.getNode(), Loads, NodesWithConsts, Mask,
NodeToMask))
return false;
continue;
}
// Allow one node which will masked along with any loads found.
if (NodeToMask)
return false;
// Also ensure that the node to be masked only produces one data result.
NodeToMask = Op.getNode();
if (NodeToMask->getNumValues() > 1) {
bool HasValue = false;
for (unsigned i = 0, e = NodeToMask->getNumValues(); i < e; ++i) {
MVT VT = SDValue(NodeToMask, i).getSimpleValueType();
if (VT != MVT::Glue && VT != MVT::Other) {
if (HasValue) {
NodeToMask = nullptr;
return false;
}
HasValue = true;
}
}
assert(HasValue && "Node to be masked has no data result?");
}
}
return true;
}
bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!Mask)
return false;
if (!Mask->getAPIntValue().isMask())
return false;
// No need to do anything if the and directly uses a load.
if (isa<LoadSDNode>(N->getOperand(0)))
return false;
SmallVector<LoadSDNode*, 8> Loads;
SmallPtrSet<SDNode*, 2> NodesWithConsts;
SDNode *FixupNode = nullptr;
if (SearchForAndLoads(N, Loads, NodesWithConsts, Mask, FixupNode)) {
if (Loads.size() == 0)
return false;
LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
SDValue MaskOp = N->getOperand(1);
// If it exists, fixup the single node we allow in the tree that needs
// masking.
if (FixupNode) {
LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
FixupNode->getValueType(0),
SDValue(FixupNode, 0), MaskOp);
DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And);
if (And.getOpcode() == ISD ::AND)
DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp);
}
// Narrow any constants that need it.
for (auto *LogicN : NodesWithConsts) {
SDValue Op0 = LogicN->getOperand(0);
SDValue Op1 = LogicN->getOperand(1);
if (isa<ConstantSDNode>(Op0))
std::swap(Op0, Op1);
SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
Op1, MaskOp);
DAG.UpdateNodeOperands(LogicN, Op0, And);
}
// Create narrow loads.
for (auto *Load : Loads) {
LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
SDValue(Load, 0), MaskOp);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
if (And.getOpcode() == ISD ::AND)
And = SDValue(
DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0);
SDValue NewLoad = ReduceLoadWidth(And.getNode());
assert(NewLoad &&
"Shouldn't be masking the load if it can't be narrowed");
CombineTo(Load, NewLoad, NewLoad.getValue(1));
}
DAG.ReplaceAllUsesWith(N, N->getOperand(0).getNode());
return true;
}
return false;
}
// Unfold
// x & (-1 'logical shift' y)
// To
// (x 'opposite logical shift' y) 'logical shift' y
// if it is better for performance.
SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {
assert(N->getOpcode() == ISD::AND);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Do we actually prefer shifts over mask?
if (!TLI.shouldFoldMaskToVariableShiftPair(N0))
return SDValue();
// Try to match (-1 '[outer] logical shift' y)
unsigned OuterShift;
unsigned InnerShift; // The opposite direction to the OuterShift.
SDValue Y; // Shift amount.
auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool {
if (!M.hasOneUse())
return false;
OuterShift = M->getOpcode();
if (OuterShift == ISD::SHL)
InnerShift = ISD::SRL;
else if (OuterShift == ISD::SRL)
InnerShift = ISD::SHL;
else
return false;
if (!isAllOnesConstant(M->getOperand(0)))
return false;
Y = M->getOperand(1);
return true;
};
SDValue X;
if (matchMask(N1))
X = N0;
else if (matchMask(N0))
X = N1;
else
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
// tmp = x 'opposite logical shift' y
SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y);
// ret = tmp 'logical shift' y
SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y);
return T1;
}
SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N1.getValueType();
// x & x --> x
if (N0 == N1)
return N0;
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (and x, 0) -> 0, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
// do not return N0, because undef node may exist in N0
return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
SDLoc(N), N0.getValueType());
if (ISD::isBuildVectorAllZeros(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
SDLoc(N), N1.getValueType());
// fold (and x, -1) -> x, vector edition
if (ISD::isBuildVectorAllOnes(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllOnes(N1.getNode()))
return N0;
}
// fold (and c1, c2) -> c1&c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
// fold (and x, -1) -> x
if (isAllOnesConstant(N1))
return N0;
// if (and x, c) is known to be zero, return 0
unsigned BitWidth = VT.getScalarSizeInBits();
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(BitWidth)))
return DAG.getConstant(0, SDLoc(N), VT);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// reassociate and
if (SDValue RAND = reassociateOps(ISD::AND, SDLoc(N), N0, N1, N->getFlags()))
return RAND;
// Try to convert a constant mask AND into a shuffle clear mask.
if (VT.isVector())
if (SDValue Shuffle = XformToShuffleWithZero(N))
return Shuffle;
// fold (and (or x, C), D) -> D if (C & D) == D
auto MatchSubset = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue());
};
if (N0.getOpcode() == ISD::OR &&
ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchSubset))
return N1;
// fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N0Op0 = N0.getOperand(0);
APInt Mask = ~N1C->getAPIntValue();
Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits());
if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
N0.getValueType(), N0Op0);
// Replace uses of the AND with uses of the Zero extend node.
CombineTo(N, Zext);
// We actually want to replace all uses of the any_extend with the
// zero_extend, to avoid duplicating things. This will later cause this
// AND to be folded.
CombineTo(N0.getNode(), Zext);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
// (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
// already be zero by virtue of the width of the base type of the load.
//
// the 'X' node here can either be nothing or an extract_vector_elt to catch
// more cases.
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() &&
N0.getOperand(0).getOpcode() == ISD::LOAD &&
N0.getOperand(0).getResNo() == 0) ||
(N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) {
LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
N0 : N0.getOperand(0) );
// Get the constant (if applicable) the zero'th operand is being ANDed with.
// This can be a pure constant or a vector splat, in which case we treat the
// vector as a scalar and use the splat value.
APInt Constant = APInt::getNullValue(1);
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
Constant = C->getAPIntValue();
} else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
SplatBitSize, HasAnyUndefs);
if (IsSplat) {
// Undef bits can contribute to a possible optimisation if set, so
// set them.
SplatValue |= SplatUndef;
// The splat value may be something like "0x00FFFFFF", which means 0 for
// the first vector value and FF for the rest, repeating. We need a mask
// that will apply equally to all members of the vector, so AND all the
// lanes of the constant together.
unsigned EltBitWidth = Vector->getValueType(0).getScalarSizeInBits();
// If the splat value has been compressed to a bitlength lower
// than the size of the vector lane, we need to re-expand it to
// the lane size.
if (EltBitWidth > SplatBitSize)
for (SplatValue = SplatValue.zextOrTrunc(EltBitWidth);
SplatBitSize < EltBitWidth; SplatBitSize = SplatBitSize * 2)
SplatValue |= SplatValue.shl(SplatBitSize);
// Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
// multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
if ((SplatBitSize % EltBitWidth) == 0) {
Constant = APInt::getAllOnesValue(EltBitWidth);
for (unsigned i = 0, n = (SplatBitSize / EltBitWidth); i < n; ++i)
Constant &= SplatValue.extractBits(EltBitWidth, i * EltBitWidth);
}
}
}
// If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
// actually legal and isn't going to get expanded, else this is a false
// optimisation.
bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
Load->getValueType(0),
Load->getMemoryVT());
// Resize the constant to the same size as the original memory access before
// extension. If it is still the AllOnesValue then this AND is completely
// unneeded.
Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits());
bool B;
switch (Load->getExtensionType()) {
default: B = false; break;
case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
case ISD::ZEXTLOAD:
case ISD::NON_EXTLOAD: B = true; break;
}
if (B && Constant.isAllOnesValue()) {
// If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
// preserve semantics once we get rid of the AND.
SDValue NewLoad(Load, 0);
// Fold the AND away. NewLoad may get replaced immediately.
CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
if (Load->getExtensionType() == ISD::EXTLOAD) {
NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
Load->getValueType(0), SDLoc(Load),
Load->getChain(), Load->getBasePtr(),
Load->getOffset(), Load->getMemoryVT(),
Load->getMemOperand());
// Replace uses of the EXTLOAD with the new ZEXTLOAD.
if (Load->getNumValues() == 3) {
// PRE/POST_INC loads have 3 values.
SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
NewLoad.getValue(2) };
CombineTo(Load, To, 3, true);
} else {
CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
}
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (and (load x), 255) -> (zextload x, i8)
// fold (and (extload x, i16), 255) -> (zextload x, i8)
// fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
(N0.getOpcode() == ISD::ANY_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::LOAD))) {
if (SDValue Res = ReduceLoadWidth(N)) {
LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
AddToWorklist(N);
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 0), Res);
return SDValue(N, 0);
}
}
if (Level >= AfterLegalizeTypes) {
// Attempt to propagate the AND back up to the leaves which, if they're
// loads, can be combined to narrow loads and the AND node can be removed.
// Perform after legalization so that extend nodes will already be
// combined into the loads.
if (BackwardsPropagateMask(N, DAG)) {
return SDValue(N, 0);
}
}
if (SDValue Combined = visitANDLike(N0, N1, N))
return Combined;
// Simplify: (and (op x...), (op y...)) -> (op (and x, y))
if (N0.getOpcode() == N1.getOpcode())
if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
return V;
// Masking the negated extension of a boolean is just the zero-extended
// boolean:
// and (sub 0, zext(bool X)), 1 --> zext(bool X)
// and (sub 0, sext(bool X)), 1 --> zext(bool X)
//
// Note: the SimplifyDemandedBits fold below can make an information-losing
// transform, and then we have no way to find this better fold.
if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) {
if (isNullOrNullSplat(N0.getOperand(0))) {
SDValue SubRHS = N0.getOperand(1);
if (SubRHS.getOpcode() == ISD::ZERO_EXTEND &&
SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
return SubRHS;
if (SubRHS.getOpcode() == ISD::SIGN_EXTEND &&
SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0));
}
}
// fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
// fold (and (sra)) -> (and (srl)) when possible.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (zext_inreg (extload x)) -> (zextload x)
// fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
if (ISD::isUNINDEXEDLoad(N0.getNode()) &&
(ISD::isEXTLoad(N0.getNode()) ||
(ISD::isSEXTLoad(N0.getNode()) && N0.hasOneUse()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
unsigned ExtBitSize = N1.getScalarValueSizeInBits();
unsigned MemBitSize = MemVT.getScalarSizeInBits();
APInt ExtBits = APInt::getHighBitsSet(ExtBitSize, ExtBitSize - MemBitSize);
if (DAG.MaskedValueIsZero(N1, ExtBits) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
SDValue ExtLoad =
DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, LN0->getChain(),
LN0->getBasePtr(), MemVT, LN0->getMemOperand());
AddToWorklist(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false))
return BSwap;
}
if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N))
return Shifts;
return SDValue();
}
/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits) {
if (!LegalOperations)
return SDValue();
EVT VT = N->getValueType(0);
if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
return SDValue();
if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
return SDValue();
// Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff)
bool LookPassAnd0 = false;
bool LookPassAnd1 = false;
if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
std::swap(N0, N1);
if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() == ISD::AND) {
if (!N0.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
// Also handle 0xffff since the LHS is guaranteed to have zeros there.
// This is needed for X86.
if (!N01C || (N01C->getZExtValue() != 0xFF00 &&
N01C->getZExtValue() != 0xFFFF))
return SDValue();
N0 = N0.getOperand(0);
LookPassAnd0 = true;
}
if (N1.getOpcode() == ISD::AND) {
if (!N1.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
if (!N11C || N11C->getZExtValue() != 0xFF)
return SDValue();
N1 = N1.getOperand(0);
LookPassAnd1 = true;
}
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
return SDValue();
if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
if (!N01C || !N11C)
return SDValue();
if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
return SDValue();
// Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
SDValue N00 = N0->getOperand(0);
if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
if (!N00.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
if (!N001C || N001C->getZExtValue() != 0xFF)
return SDValue();
N00 = N00.getOperand(0);
LookPassAnd0 = true;
}
SDValue N10 = N1->getOperand(0);
if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
if (!N10.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
// Also allow 0xFFFF since the bits will be shifted out. This is needed
// for X86.
if (!N101C || (N101C->getZExtValue() != 0xFF00 &&
N101C->getZExtValue() != 0xFFFF))
return SDValue();
N10 = N10.getOperand(0);
LookPassAnd1 = true;
}
if (N00 != N10)
return SDValue();
// Make sure everything beyond the low halfword gets set to zero since the SRL
// 16 will clear the top bits.
unsigned OpSizeInBits = VT.getSizeInBits();
if (DemandHighBits && OpSizeInBits > 16) {
// If the left-shift isn't masked out then the only way this is a bswap is
// if all bits beyond the low 8 are 0. In that case the entire pattern
// reduces to a left shift anyway: leave it for other parts of the combiner.
if (!LookPassAnd0)
return SDValue();
// However, if the right shift isn't masked out then it might be because
// it's not needed. See if we can spot that too.
if (!LookPassAnd1 &&
!DAG.MaskedValueIsZero(
N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
return SDValue();
}
SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
if (OpSizeInBits > 16) {
SDLoc DL(N);
Res = DAG.getNode(ISD::SRL, DL, VT, Res,
DAG.getConstant(OpSizeInBits - 16, DL,
getShiftAmountTy(VT)));
}
return Res;
}
/// Return true if the specified node is an element that makes up a 32-bit
/// packed halfword byteswap.
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
if (!N.getNode()->hasOneUse())
return false;
unsigned Opc = N.getOpcode();
if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
return false;
SDValue N0 = N.getOperand(0);
unsigned Opc0 = N0.getOpcode();
if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL)
return false;
ConstantSDNode *N1C = nullptr;
// SHL or SRL: look upstream for AND mask operand
if (Opc == ISD::AND)
N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
else if (Opc0 == ISD::AND)
N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N1C)
return false;
unsigned MaskByteOffset;
switch (N1C->getZExtValue()) {
default:
return false;
case 0xFF: MaskByteOffset = 0; break;
case 0xFF00: MaskByteOffset = 1; break;
case 0xFFFF:
// In case demanded bits didn't clear the bits that will be shifted out.
// This is needed for X86.
if (Opc == ISD::SRL || (Opc == ISD::AND && Opc0 == ISD::SHL)) {
MaskByteOffset = 1;
break;
}
return false;
case 0xFF0000: MaskByteOffset = 2; break;
case 0xFF000000: MaskByteOffset = 3; break;
}
// Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
if (Opc == ISD::AND) {
if (MaskByteOffset == 0 || MaskByteOffset == 2) {
// (x >> 8) & 0xff
// (x >> 8) & 0xff0000
if (Opc0 != ISD::SRL)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
} else {
// (x << 8) & 0xff00
// (x << 8) & 0xff000000
if (Opc0 != ISD::SHL)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
}
} else if (Opc == ISD::SHL) {
// (x & 0xff) << 8
// (x & 0xff0000) << 8
if (MaskByteOffset != 0 && MaskByteOffset != 2)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
} else { // Opc == ISD::SRL
// (x & 0xff00) >> 8
// (x & 0xff000000) >> 8
if (MaskByteOffset != 1 && MaskByteOffset != 3)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
}
if (Parts[MaskByteOffset])
return false;
Parts[MaskByteOffset] = N0.getOperand(0).getNode();
return true;
}
/// Match a 32-bit packed halfword bswap. That is
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
/// => (rotl (bswap x), 16)
SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
if (!LegalOperations)
return SDValue();
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
return SDValue();
// Look for either
// (or (or (and), (and)), (or (and), (and)))
// (or (or (or (and), (and)), (and)), (and))
if (N0.getOpcode() != ISD::OR)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDNode *Parts[4] = {};
if (N1.getOpcode() == ISD::OR &&
N00.getNumOperands() == 2 && N01.getNumOperands() == 2) {
// (or (or (and), (and)), (or (and), (and)))
if (!isBSwapHWordElement(N00, Parts))
return SDValue();
if (!isBSwapHWordElement(N01, Parts))
return SDValue();
SDValue N10 = N1.getOperand(0);
if (!isBSwapHWordElement(N10, Parts))
return SDValue();
SDValue N11 = N1.getOperand(1);
if (!isBSwapHWordElement(N11, Parts))
return SDValue();
} else {
// (or (or (or (and), (and)), (and)), (and))
if (!isBSwapHWordElement(N1, Parts))
return SDValue();
if (!isBSwapHWordElement(N01, Parts))
return SDValue();
if (N00.getOpcode() != ISD::OR)
return SDValue();
SDValue N000 = N00.getOperand(0);
if (!isBSwapHWordElement(N000, Parts))
return SDValue();
SDValue N001 = N00.getOperand(1);
if (!isBSwapHWordElement(N001, Parts))
return SDValue();
}
// Make sure the parts are all coming from the same node.
if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
return SDValue();
SDLoc DL(N);
SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT,
SDValue(Parts[0], 0));
// Result of the bswap should be rotated by 16. If it's not legal, then
// do (x << 16) | (x >> 16).
SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT));
if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt);
if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
return DAG.getNode(ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt),
DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt));
}
/// This contains all DAGCombine rules which reduce two values combined by
/// an Or operation to a single value \see visitANDLike().
SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *N) {
EVT VT = N1.getValueType();
SDLoc DL(N);
// fold (or x, undef) -> -1
if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
return DAG.getAllOnesConstant(DL, VT);
if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
return V;
// (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
// Don't increase # computations.
(N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
// We can only do this xform if we know that bits from X that are set in C2
// but not in C1 are already zero. Likewise for Y.
if (const ConstantSDNode *N0O1C =
getAsNonOpaqueConstant(N0.getOperand(1))) {
if (const ConstantSDNode *N1O1C =
getAsNonOpaqueConstant(N1.getOperand(1))) {
// We can only do this xform if we know that bits from X that are set in
// C2 but not in C1 are already zero. Likewise for Y.
const APInt &LHSMask = N0O1C->getAPIntValue();
const APInt &RHSMask = N1O1C->getAPIntValue();
if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
N0.getOperand(0), N1.getOperand(0));
return DAG.getNode(ISD::AND, DL, VT, X,
DAG.getConstant(LHSMask | RHSMask, DL, VT));
}
}
}
}
// (or (and X, M), (and X, N)) -> (and X, (or M, N))
if (N0.getOpcode() == ISD::AND &&
N1.getOpcode() == ISD::AND &&
N0.getOperand(0) == N1.getOperand(0) &&
// Don't increase # computations.
(N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
N0.getOperand(1), N1.getOperand(1));
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X);
}
return SDValue();
}
/// OR combines for which the commuted variant will be tried as well.
static SDValue visitORCommutative(
SelectionDAG &DAG, SDValue N0, SDValue N1, SDNode *N) {
EVT VT = N0.getValueType();
if (N0.getOpcode() == ISD::AND) {
// fold (or (and X, (xor Y, -1)), Y) -> (or X, Y)
if (isBitwiseNot(N0.getOperand(1)) && N0.getOperand(1).getOperand(0) == N1)
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(0), N1);
// fold (or (and (xor Y, -1), X), Y) -> (or X, Y)
if (isBitwiseNot(N0.getOperand(0)) && N0.getOperand(0).getOperand(0) == N1)
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(1), N1);
}
return SDValue();
}
SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N1.getValueType();
// x | x --> x
if (N0 == N1)
return N0;
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (or x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
// fold (or x, -1) -> -1, vector edition
if (ISD::isBuildVectorAllOnes(N0.getNode()))
// do not return N0, because undef node may exist in N0
return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType());
if (ISD::isBuildVectorAllOnes(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType());
// fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask)
// Do this only if the resulting shuffle is legal.
if (isa<ShuffleVectorSDNode>(N0) &&
isa<ShuffleVectorSDNode>(N1) &&
// Avoid folding a node with illegal type.
TLI.isTypeLegal(VT)) {
bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode());
bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode());
bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode());
// Ensure both shuffles have a zero input.
if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) {
assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!");
assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!");
const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
bool CanFold = true;
int NumElts = VT.getVectorNumElements();
SmallVector<int, 4> Mask(NumElts);
for (int i = 0; i != NumElts; ++i) {
int M0 = SV0->getMaskElt(i);
int M1 = SV1->getMaskElt(i);
// Determine if either index is pointing to a zero vector.
bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts));
bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts));
// If one element is zero and the otherside is undef, keep undef.
// This also handles the case that both are undef.
if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) {
Mask[i] = -1;
continue;
}
// Make sure only one of the elements is zero.
if (M0Zero == M1Zero) {
CanFold = false;
break;
}
assert((M0 >= 0 || M1 >= 0) && "Undef index!");
// We have a zero and non-zero element. If the non-zero came from
// SV0 make the index a LHS index. If it came from SV1, make it
// a RHS index. We need to mod by NumElts because we don't care
// which operand it came from in the original shuffles.
Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts;
}
if (CanFold) {
SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0);
SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0);
bool LegalMask = TLI.isShuffleMaskLegal(Mask, VT);
if (!LegalMask) {
std::swap(NewLHS, NewRHS);
ShuffleVectorSDNode::commuteMask(Mask);
LegalMask = TLI.isShuffleMaskLegal(Mask, VT);
}
if (LegalMask)
return DAG.getVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS, Mask);
}
}
}
}
// fold (or c1, c2) -> c1|c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
// fold (or x, 0) -> x
if (isNullConstant(N1))
return N0;
// fold (or x, -1) -> -1
if (isAllOnesConstant(N1))
return N1;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// fold (or x, c) -> c iff (x & ~c) == 0
if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
return N1;
if (SDValue Combined = visitORLike(N0, N1, N))
return Combined;
// Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
if (SDValue BSwap = MatchBSwapHWord(N, N0, N1))
return BSwap;
if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1))
return BSwap;
// reassociate or
if (SDValue ROR = reassociateOps(ISD::OR, SDLoc(N), N0, N1, N->getFlags()))
return ROR;
// Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
// iff (c1 & c2) != 0 or c1/c2 are undef.
auto MatchIntersect = [](ConstantSDNode *C1, ConstantSDNode *C2) {
return !C1 || !C2 || C1->getAPIntValue().intersects(C2->getAPIntValue());
};
if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect, true)) {
if (SDValue COR = DAG.FoldConstantArithmetic(
ISD::OR, SDLoc(N1), VT, N1.getNode(), N0.getOperand(1).getNode())) {
SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1);
AddToWorklist(IOR.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, COR, IOR);
}
}
if (SDValue Combined = visitORCommutative(DAG, N0, N1, N))
return Combined;
if (SDValue Combined = visitORCommutative(DAG, N1, N0, N))
return Combined;
// Simplify: (or (op x...), (op y...)) -> (op (or x, y))
if (N0.getOpcode() == N1.getOpcode())
if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
return V;
// See if this is some rotate idiom.
if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N)))
return SDValue(Rot, 0);
if (SDValue Load = MatchLoadCombine(N))
return Load;
// Simplify the operands using demanded-bits information.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// If OR can be rewritten into ADD, try combines based on ADD.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) &&
DAG.haveNoCommonBitsSet(N0, N1))
if (SDValue Combined = visitADDLike(N))
return Combined;
return SDValue();
}
static SDValue stripConstantMask(SelectionDAG &DAG, SDValue Op, SDValue &Mask) {
if (Op.getOpcode() == ISD::AND &&
DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) {
Mask = Op.getOperand(1);
return Op.getOperand(0);
}
return Op;
}
/// Match "(X shl/srl V1) & V2" where V2 may not be present.
static bool matchRotateHalf(SelectionDAG &DAG, SDValue Op, SDValue &Shift,
SDValue &Mask) {
Op = stripConstantMask(DAG, Op, Mask);
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
Shift = Op;
return true;
}
return false;
}
/// Helper function for visitOR to extract the needed side of a rotate idiom
/// from a shl/srl/mul/udiv. This is meant to handle cases where
/// InstCombine merged some outside op with one of the shifts from
/// the rotate pattern.
/// \returns An empty \c SDValue if the needed shift couldn't be extracted.
/// Otherwise, returns an expansion of \p ExtractFrom based on the following
/// patterns:
///
/// (or (mul v c0) (shrl (mul v c1) c2)):
/// expands (mul v c0) -> (shl (mul v c1) c3)
///
/// (or (udiv v c0) (shl (udiv v c1) c2)):
/// expands (udiv v c0) -> (shrl (udiv v c1) c3)
///
/// (or (shl v c0) (shrl (shl v c1) c2)):
/// expands (shl v c0) -> (shl (shl v c1) c3)
///
/// (or (shrl v c0) (shl (shrl v c1) c2)):
/// expands (shrl v c0) -> (shrl (shrl v c1) c3)
///
/// Such that in all cases, c3+c2==bitwidth(op v c1).
static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
SDValue ExtractFrom, SDValue &Mask,
const SDLoc &DL) {
assert(OppShift && ExtractFrom && "Empty SDValue");
assert(
(OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) &&
"Existing shift must be valid as a rotate half");
ExtractFrom = stripConstantMask(DAG, ExtractFrom, Mask);
// Preconditions:
// (or (op0 v c0) (shiftl/r (op0 v c1) c2))
//
// Find opcode of the needed shift to be extracted from (op0 v c0).
unsigned Opcode = ISD::DELETED_NODE;
bool IsMulOrDiv = false;
// Set Opcode and IsMulOrDiv if the extract opcode matches the needed shift
// opcode or its arithmetic (mul or udiv) variant.
auto SelectOpcode = [&](unsigned NeededShift, unsigned MulOrDivVariant) {
IsMulOrDiv = ExtractFrom.getOpcode() == MulOrDivVariant;
if (!IsMulOrDiv && ExtractFrom.getOpcode() != NeededShift)
return false;
Opcode = NeededShift;
return true;
};
// op0 must be either the needed shift opcode or the mul/udiv equivalent
// that the needed shift can be extracted from.
if ((OppShift.getOpcode() != ISD::SRL || !SelectOpcode(ISD::SHL, ISD::MUL)) &&
(OppShift.getOpcode() != ISD::SHL || !SelectOpcode(ISD::SRL, ISD::UDIV)))
return SDValue();
// op0 must be the same opcode on both sides, have the same LHS argument,
// and produce the same value type.
SDValue OppShiftLHS = OppShift.getOperand(0);
EVT ShiftedVT = OppShiftLHS.getValueType();
if (OppShiftLHS.getOpcode() != ExtractFrom.getOpcode() ||
OppShiftLHS.getOperand(0) != ExtractFrom.getOperand(0) ||
ShiftedVT != ExtractFrom.getValueType())
return SDValue();
// Amount of the existing shift.
ConstantSDNode *OppShiftCst = isConstOrConstSplat(OppShift.getOperand(1));
// Constant mul/udiv/shift amount from the RHS of the shift's LHS op.
ConstantSDNode *OppLHSCst = isConstOrConstSplat(OppShiftLHS.getOperand(1));
// Constant mul/udiv/shift amount from the RHS of the ExtractFrom op.
ConstantSDNode *ExtractFromCst =
isConstOrConstSplat(ExtractFrom.getOperand(1));
// TODO: We should be able to handle non-uniform constant vectors for these values
// Check that we have constant values.
if (!OppShiftCst || !OppShiftCst->getAPIntValue() ||
!OppLHSCst || !OppLHSCst->getAPIntValue() ||
!ExtractFromCst || !ExtractFromCst->getAPIntValue())
return SDValue();
// Compute the shift amount we need to extract to complete the rotate.
const unsigned VTWidth = ShiftedVT.getScalarSizeInBits();
if (OppShiftCst->getAPIntValue().ugt(VTWidth))
return SDValue();
APInt NeededShiftAmt = VTWidth - OppShiftCst->getAPIntValue();
// Normalize the bitwidth of the two mul/udiv/shift constant operands.
APInt ExtractFromAmt = ExtractFromCst->getAPIntValue();
APInt OppLHSAmt = OppLHSCst->getAPIntValue();
zeroExtendToMatch(ExtractFromAmt, OppLHSAmt);
// Now try extract the needed shift from the ExtractFrom op and see if the
// result matches up with the existing shift's LHS op.
if (IsMulOrDiv) {
// Op to extract from is a mul or udiv by a constant.
// Check:
// c2 / (1 << (bitwidth(op0 v c0) - c1)) == c0
// c2 % (1 << (bitwidth(op0 v c0) - c1)) == 0
const APInt ExtractDiv = APInt::getOneBitSet(ExtractFromAmt.getBitWidth(),
NeededShiftAmt.getZExtValue());
APInt ResultAmt;
APInt Rem;
APInt::udivrem(ExtractFromAmt, ExtractDiv, ResultAmt, Rem);
if (Rem != 0 || ResultAmt != OppLHSAmt)
return SDValue();
} else {
// Op to extract from is a shift by a constant.
// Check:
// c2 - (bitwidth(op0 v c0) - c1) == c0
if (OppLHSAmt != ExtractFromAmt - NeededShiftAmt.zextOrTrunc(
ExtractFromAmt.getBitWidth()))
return SDValue();
}
// Return the expanded shift op that should allow a rotate to be formed.
EVT ShiftVT = OppShift.getOperand(1).getValueType();
EVT ResVT = ExtractFrom.getValueType();
SDValue NewShiftNode = DAG.getConstant(NeededShiftAmt, DL, ShiftVT);
return DAG.getNode(Opcode, DL, ResVT, OppShiftLHS, NewShiftNode);
}
// Return true if we can prove that, whenever Neg and Pos are both in the
// range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that
// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
//
// (or (shift1 X, Neg), (shift2 X, Pos))
//
// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
// in direction shift1 by Neg. The range [0, EltSize) means that we only need
// to consider shift amounts with defined behavior.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
SelectionDAG &DAG) {
// If EltSize is a power of 2 then:
//
// (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
// (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize).
//
// So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check
// for the stronger condition:
//
// Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A]
//
// for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1)
// we can just replace Neg with Neg' for the rest of the function.
//
// In other cases we check for the even stronger condition:
//
// Neg == EltSize - Pos [B]
//
// for all Neg and Pos. Note that the (or ...) then invokes undefined
// behavior if Pos == 0 (and consequently Neg == EltSize).
//
// We could actually use [A] whenever EltSize is a power of 2, but the
// only extra cases that it would match are those uninteresting ones
// where Neg and Pos are never in range at the same time. E.g. for
// EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
// as well as (sub 32, Pos), but:
//
// (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
//
// always invokes undefined behavior for 32-bit X.
//
// Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
unsigned MaskLoBits = 0;
if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
unsigned Bits = Log2_64(EltSize);
if (NegC->getAPIntValue().getActiveBits() <= Bits &&
((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
Neg = Neg.getOperand(0);
MaskLoBits = Bits;
}
}
}
// Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
if (Neg.getOpcode() != ISD::SUB)
return false;
ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0));
if (!NegC)
return false;
SDValue NegOp1 = Neg.getOperand(1);
// On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with
// Pos'. The truncation is redundant for the purpose of the equality.
if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
KnownBits Known = DAG.computeKnownBits(Pos.getOperand(0));
if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
MaskLoBits))
Pos = Pos.getOperand(0);
}
}
// The condition we need is now:
//
// (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask
//
// If NegOp1 == Pos then we need:
//
// EltSize & Mask == NegC & Mask
//
// (because "x & Mask" is a truncation and distributes through subtraction).
APInt Width;
if (Pos == NegOp1)
Width = NegC->getAPIntValue();
// Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
// Then the condition we want to prove becomes:
//
// (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask
//
// which, again because "x & Mask" is a truncation, becomes:
//
// NegC & Mask == (EltSize - PosC) & Mask
// EltSize & Mask == (NegC + PosC) & Mask
else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) {
if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1)))
Width = PosC->getAPIntValue() + NegC->getAPIntValue();
else
return false;
} else
return false;
// Now we just need to check that EltSize & Mask == Width & Mask.
if (MaskLoBits)
// EltSize & Mask is 0 since Mask is EltSize - 1.
return Width.getLoBits(MaskLoBits) == 0;
return Width == EltSize;
}
// A subroutine of MatchRotate used once we have found an OR of two opposite
// shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
// former being preferred if supported. InnerPos and InnerNeg are Pos and
// Neg with outer conversions stripped away.
SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
SDValue Neg, SDValue InnerPos,
SDValue InnerNeg, unsigned PosOpcode,
unsigned NegOpcode, const SDLoc &DL) {
// fold (or (shl x, (*ext y)),
// (srl x, (*ext (sub 32, y)))) ->
// (rotl x, y) or (rotr x, (sub 32, y))
//
// fold (or (shl x, (*ext (sub 32, y))),
// (srl x, (*ext y))) ->
// (rotr x, y) or (rotl x, (sub 32, y))
EVT VT = Shifted.getValueType();
if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG)) {
bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
HasPos ? Pos : Neg).getNode();
}
return nullptr;
}
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
// Must be a legal type. Expanded 'n promoted things won't work with rotates.
EVT VT = LHS.getValueType();
if (!TLI.isTypeLegal(VT)) return nullptr;
// The target must have at least one rotate flavor.
bool HasROTL = hasOperation(ISD::ROTL, VT);
bool HasROTR = hasOperation(ISD::ROTR, VT);
if (!HasROTL && !HasROTR) return nullptr;
// Check for truncated rotate.
if (LHS.getOpcode() == ISD::TRUNCATE && RHS.getOpcode() == ISD::TRUNCATE &&
LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) {
assert(LHS.getValueType() == RHS.getValueType());
if (SDNode *Rot = MatchRotate(LHS.getOperand(0), RHS.getOperand(0), DL)) {
return DAG.getNode(ISD::TRUNCATE, SDLoc(LHS), LHS.getValueType(),
SDValue(Rot, 0)).getNode();
}
}
// Match "(X shl/srl V1) & V2" where V2 may not be present.
SDValue LHSShift; // The shift.
SDValue LHSMask; // AND value if any.
matchRotateHalf(DAG, LHS, LHSShift, LHSMask);
SDValue RHSShift; // The shift.
SDValue RHSMask; // AND value if any.
matchRotateHalf(DAG, RHS, RHSShift, RHSMask);
// If neither side matched a rotate half, bail
if (!LHSShift && !RHSShift)
return nullptr;
// InstCombine may have combined a constant shl, srl, mul, or udiv with one
// side of the rotate, so try to handle that here. In all cases we need to
// pass the matched shift from the opposite side to compute the opcode and
// needed shift amount to extract. We still want to do this if both sides
// matched a rotate half because one half may be a potential overshift that
// can be broken down (ie if InstCombine merged two shl or srl ops into a
// single one).
// Have LHS side of the rotate, try to extract the needed shift from the RHS.
if (LHSShift)
if (SDValue NewRHSShift =
extractShiftForRotate(DAG, LHSShift, RHS, RHSMask, DL))
RHSShift = NewRHSShift;
// Have RHS side of the rotate, try to extract the needed shift from the LHS.
if (RHSShift)
if (SDValue NewLHSShift =
extractShiftForRotate(DAG, RHSShift, LHS, LHSMask, DL))
LHSShift = NewLHSShift;
// If a side is still missing, nothing else we can do.
if (!RHSShift || !LHSShift)
return nullptr;
// At this point we've matched or extracted a shift op on each side.
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
return nullptr; // Not shifting the same value.
if (LHSShift.getOpcode() == RHSShift.getOpcode())
return nullptr; // Shifts must disagree.
// Canonicalize shl to left side in a shl/srl pair.
if (RHSShift.getOpcode() == ISD::SHL) {
std::swap(LHS, RHS);
std::swap(LHSShift, RHSShift);
std::swap(LHSMask, RHSMask);
}
unsigned EltSizeInBits = VT.getScalarSizeInBits();
SDValue LHSShiftArg = LHSShift.getOperand(0);
SDValue LHSShiftAmt = LHSShift.getOperand(1);
SDValue RHSShiftArg = RHSShift.getOperand(0);
SDValue RHSShiftAmt = RHSShift.getOperand(1);
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
// fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
auto MatchRotateSum = [EltSizeInBits](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
return (LHS->getAPIntValue() + RHS->getAPIntValue()) == EltSizeInBits;
};
if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) {
SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.getNode() || RHSMask.getNode()) {
SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
SDValue Mask = AllOnes;
if (LHSMask.getNode()) {
SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
}
if (RHSMask.getNode()) {
SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
}
Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask);
}
return Rot.getNode();
}
// If there is a mask here, and we have a variable shift, we can't be sure
// that we're masking out the right stuff.
if (LHSMask.getNode() || RHSMask.getNode())
return nullptr;
// If the shift amount is sign/zext/any-extended just peel it off.
SDValue LExtOp0 = LHSShiftAmt;
SDValue RExtOp0 = RHSShiftAmt;
if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
(RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
LExtOp0 = LHSShiftAmt.getOperand(0);
RExtOp0 = RHSShiftAmt.getOperand(0);
}
SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
if (TryL)
return TryL;
SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
if (TryR)
return TryR;
return nullptr;
}
namespace {
/// Represents known origin of an individual byte in load combine pattern. The
/// value of the byte is either constant zero or comes from memory.
struct ByteProvider {
// For constant zero providers Load is set to nullptr. For memory providers
// Load represents the node which loads the byte from memory.
// ByteOffset is the offset of the byte in the value produced by the load.
LoadSDNode *Load = nullptr;
unsigned ByteOffset = 0;
ByteProvider() = default;
static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) {
return ByteProvider(Load, ByteOffset);
}
static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); }
bool isConstantZero() const { return !Load; }
bool isMemory() const { return Load; }
bool operator==(const ByteProvider &Other) const {
return Other.Load == Load && Other.ByteOffset == ByteOffset;
}
private:
ByteProvider(LoadSDNode *Load, unsigned ByteOffset)
: Load(Load), ByteOffset(ByteOffset) {}
};
} // end anonymous namespace
/// Recursively traverses the expression calculating the origin of the requested
/// byte of the given value. Returns None if the provider can't be calculated.
///
/// For all the values except the root of the expression verifies that the value
/// has exactly one use and if it's not true return None. This way if the origin
/// of the byte is returned it's guaranteed that the values which contribute to
/// the byte are not used outside of this expression.
///
/// Because the parts of the expression are not allowed to have more than one
/// use this function iterates over trees, not DAGs. So it never visits the same
/// node more than once.
static const Optional<ByteProvider>
calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
bool Root = false) {
// Typical i64 by i8 pattern requires recursion up to 8 calls depth
if (Depth == 10)
return None;
if (!Root && !Op.hasOneUse())
return None;
assert(Op.getValueType().isScalarInteger() && "can't handle other types");
unsigned BitWidth = Op.getValueSizeInBits();
if (BitWidth % 8 != 0)
return None;
unsigned ByteWidth = BitWidth / 8;
assert(Index < ByteWidth && "invalid index requested");
(void) ByteWidth;
switch (Op.getOpcode()) {
case ISD::OR: {
auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1);
if (!LHS)
return None;
auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1);
if (!RHS)
return None;
if (LHS->isConstantZero())
return RHS;
if (RHS->isConstantZero())
return LHS;
return None;
}
case ISD::SHL: {
auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
if (!ShiftOp)
return None;
uint64_t BitShift = ShiftOp->getZExtValue();
if (BitShift % 8 != 0)
return None;
uint64_t ByteShift = BitShift / 8;
return Index < ByteShift
? ByteProvider::getConstantZero()
: calculateByteProvider(Op->getOperand(0), Index - ByteShift,
Depth + 1);
}
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND: {
SDValue NarrowOp = Op->getOperand(0);
unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
if (NarrowBitWidth % 8 != 0)
return None;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
if (Index >= NarrowByteWidth)
return Op.getOpcode() == ISD::ZERO_EXTEND
? Optional<ByteProvider>(ByteProvider::getConstantZero())
: None;
return calculateByteProvider(NarrowOp, Index, Depth + 1);
}
case ISD::BSWAP:
return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
Depth + 1);
case ISD::LOAD: {
auto L = cast<LoadSDNode>(Op.getNode());
if (L->isVolatile() || L->isIndexed())
return None;
unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
if (NarrowBitWidth % 8 != 0)
return None;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
if (Index >= NarrowByteWidth)
return L->getExtensionType() == ISD::ZEXTLOAD
? Optional<ByteProvider>(ByteProvider::getConstantZero())
: None;
return ByteProvider::getMemory(L, Index);
}
}
return None;
}
static unsigned LittleEndianByteAt(unsigned BW, unsigned i) {
return i;
}
static unsigned BigEndianByteAt(unsigned BW, unsigned i) {
return BW - i - 1;
}
// Check if the bytes offsets we are looking at match with either big or
// little endian value loaded. Return true for big endian, false for little
// endian, and None if match failed.
static Optional<bool> isBigEndian(const SmallVector<int64_t, 4> &ByteOffsets,
int64_t FirstOffset) {
// The endian can be decided only when it is 2 bytes at least.
unsigned Width = ByteOffsets.size();
if (Width < 2)
return None;
bool BigEndian = true, LittleEndian = true;
for (unsigned i = 0; i < Width; i++) {
int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset;
LittleEndian &= CurrentByteOffset == LittleEndianByteAt(Width, i);
BigEndian &= CurrentByteOffset == BigEndianByteAt(Width, i);
if (!BigEndian && !LittleEndian)
return None;
}
assert((BigEndian != LittleEndian) && "It should be either big endian or"
"little endian");
return BigEndian;
}
static SDValue stripTruncAndExt(SDValue Value) {
switch (Value.getOpcode()) {
case ISD::TRUNCATE:
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ANY_EXTEND:
return stripTruncAndExt(Value.getOperand(0));
}
return Value;
}
/// Match a pattern where a wide type scalar value is stored by several narrow
/// stores. Fold it into a single store or a BSWAP and a store if the targets
/// supports it.
///
/// Assuming little endian target:
/// i8 *p = ...
/// i32 val = ...
/// p[0] = (val >> 0) & 0xFF;
/// p[1] = (val >> 8) & 0xFF;
/// p[2] = (val >> 16) & 0xFF;
/// p[3] = (val >> 24) & 0xFF;
/// =>
/// *((i32)p) = val;
///
/// i8 *p = ...
/// i32 val = ...
/// p[0] = (val >> 24) & 0xFF;
/// p[1] = (val >> 16) & 0xFF;
/// p[2] = (val >> 8) & 0xFF;
/// p[3] = (val >> 0) & 0xFF;
/// =>
/// *((i32)p) = BSWAP(val);
SDValue DAGCombiner::MatchStoreCombine(StoreSDNode *N) {
// Collect all the stores in the chain.
SDValue Chain;
SmallVector<StoreSDNode *, 8> Stores;
for (StoreSDNode *Store = N; Store; Store = dyn_cast<StoreSDNode>(Chain)) {
if (Store->getMemoryVT() != MVT::i8 ||
Store->isVolatile() || Store->isIndexed())
return SDValue();
Stores.push_back(Store);
Chain = Store->getChain();
}
// Handle the simple type only.
unsigned Width = Stores.size();
EVT VT = EVT::getIntegerVT(
*DAG.getContext(), Width * N->getMemoryVT().getSizeInBits());
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (LegalOperations && !TLI.isOperationLegal(ISD::STORE, VT))
return SDValue();
// Check if all the bytes of the combined value we are looking at are stored
// to the same base address. Collect bytes offsets from Base address into
// ByteOffsets.
SDValue CombinedValue;
SmallVector<int64_t, 4> ByteOffsets(Width, INT64_MAX);
int64_t FirstOffset = INT64_MAX;
StoreSDNode *FirstStore = nullptr;
Optional<BaseIndexOffset> Base;
for (auto Store : Stores) {
// All the stores store different byte of the CombinedValue. A truncate is
// required to get that byte value.
SDValue Trunc = Store->getValue();
if (Trunc.getOpcode() != ISD::TRUNCATE)
return SDValue();
// A shift operation is required to get the right byte offset, except the
// first byte.
int64_t Offset = 0;
SDValue Value = Trunc.getOperand(0);
if (Value.getOpcode() == ISD::SRL ||
Value.getOpcode() == ISD::SRA) {
ConstantSDNode *ShiftOffset =
dyn_cast<ConstantSDNode>(Value.getOperand(1));
// Trying to match the following pattern. The shift offset must be
// a constant and a multiple of 8. It is the byte offset in "y".
//
// x = srl y, offset
// i8 z = trunc x
// store z, ...
if (!ShiftOffset || (ShiftOffset->getSExtValue() % 8))
return SDValue();
Offset = ShiftOffset->getSExtValue()/8;
Value = Value.getOperand(0);
}
// Stores must share the same combined value with different offsets.
if (!CombinedValue)
CombinedValue = Value;
else if (stripTruncAndExt(CombinedValue) != stripTruncAndExt(Value))
return SDValue();
// The trunc and all the extend operation should be stripped to get the
// real value we are stored.
else if (CombinedValue.getValueType() != VT) {
if (Value.getValueType() == VT ||
Value.getValueSizeInBits() > CombinedValue.getValueSizeInBits())
CombinedValue = Value;
// Give up if the combined value type is smaller than the store size.
if (CombinedValue.getValueSizeInBits() < VT.getSizeInBits())
return SDValue();
}
// Stores must share the same base address
BaseIndexOffset Ptr = BaseIndexOffset::match(Store, DAG);
int64_t ByteOffsetFromBase = 0;
if (!Base)
Base = Ptr;
else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
return SDValue();
// Remember the first byte store
if (ByteOffsetFromBase < FirstOffset) {
FirstStore = Store;
FirstOffset = ByteOffsetFromBase;
}
// Map the offset in the store and the offset in the combined value, and
// early return if it has been set before.
if (Offset < 0 || Offset >= Width || ByteOffsets[Offset] != INT64_MAX)
return SDValue();
ByteOffsets[Offset] = ByteOffsetFromBase;
}
assert(FirstOffset != INT64_MAX && "First byte offset must be set");
assert(FirstStore && "First store must be set");
// Check if the bytes of the combined value we are looking at match with
// either big or little endian value store.
Optional<bool> IsBigEndian = isBigEndian(ByteOffsets, FirstOffset);
if (!IsBigEndian.hasValue())
return SDValue();
// The node we are looking at matches with the pattern, check if we can
// replace it with a single bswap if needed and store.
// If the store needs byte swap check if the target supports it
bool NeedsBswap = DAG.getDataLayout().isBigEndian() != *IsBigEndian;
// Before legalize we can introduce illegal bswaps which will be later
// converted to an explicit bswap sequence. This way we end up with a single
// store and byte shuffling instead of several stores and byte shuffling.
if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT))
return SDValue();
// Check that a store of the wide type is both allowed and fast on the target
bool Fast = false;
bool Allowed =
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
*FirstStore->getMemOperand(), &Fast);
if (!Allowed || !Fast)
return SDValue();
if (VT != CombinedValue.getValueType()) {
assert(CombinedValue.getValueType().getSizeInBits() > VT.getSizeInBits() &&
"Get unexpected store value to combine");
CombinedValue = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
CombinedValue);
}
if (NeedsBswap)
CombinedValue = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, CombinedValue);
SDValue NewStore =
DAG.getStore(Chain, SDLoc(N), CombinedValue, FirstStore->getBasePtr(),
FirstStore->getPointerInfo(), FirstStore->getAlignment());
// Rely on other DAG combine rules to remove the other individual stores.
DAG.ReplaceAllUsesWith(N, NewStore.getNode());
return NewStore;
}
/// Match a pattern where a wide type scalar value is loaded by several narrow
/// loads and combined by shifts and ors. Fold it into a single load or a load
/// and a BSWAP if the targets supports it.
///
/// Assuming little endian target:
/// i8 *a = ...
/// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
/// =>
/// i32 val = *((i32)a)
///
/// i8 *a = ...
/// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
/// =>
/// i32 val = BSWAP(*((i32)a))
///
/// TODO: This rule matches complex patterns with OR node roots and doesn't
/// interact well with the worklist mechanism. When a part of the pattern is
/// updated (e.g. one of the loads) its direct users are put into the worklist,
/// but the root node of the pattern which triggers the load combine is not
/// necessarily a direct user of the changed node. For example, once the address
/// of t28 load is reassociated load combine won't be triggered:
/// t25: i32 = add t4, Constant:i32<2>
/// t26: i64 = sign_extend t25
/// t27: i64 = add t2, t26
/// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64
/// t29: i32 = zero_extend t28
/// t32: i32 = shl t29, Constant:i8<8>
/// t33: i32 = or t23, t32
/// As a possible fix visitLoad can check if the load can be a part of a load
/// combine pattern and add corresponding OR roots to the worklist.
SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
assert(N->getOpcode() == ISD::OR &&
"Can only match load combining against OR nodes");
// Handles simple types only
EVT VT = N->getValueType(0);
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
unsigned ByteWidth = VT.getSizeInBits() / 8;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Before legalize we can introduce too wide illegal loads which will be later
// split into legal sized loads. This enables us to combine i64 load by i8
// patterns to a couple of i32 loads on 32 bit targets.
if (LegalOperations && !TLI.isOperationLegal(ISD::LOAD, VT))
return SDValue();
bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian();
auto MemoryByteOffset = [&] (ByteProvider P) {
assert(P.isMemory() && "Must be a memory byte provider");
unsigned LoadBitWidth = P.Load->getMemoryVT().getSizeInBits();
assert(LoadBitWidth % 8 == 0 &&
"can only analyze providers for individual bytes not bit");
unsigned LoadByteWidth = LoadBitWidth / 8;
return IsBigEndianTarget
? BigEndianByteAt(LoadByteWidth, P.ByteOffset)
: LittleEndianByteAt(LoadByteWidth, P.ByteOffset);
};
Optional<BaseIndexOffset> Base;
SDValue Chain;
SmallPtrSet<LoadSDNode *, 8> Loads;
Optional<ByteProvider> FirstByteProvider;
int64_t FirstOffset = INT64_MAX;
// Check if all the bytes of the OR we are looking at are loaded from the same
// base address. Collect bytes offsets from Base address in ByteOffsets.
SmallVector<int64_t, 4> ByteOffsets(ByteWidth);
for (unsigned i = 0; i < ByteWidth; i++) {
auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true);
if (!P || !P->isMemory()) // All the bytes must be loaded from memory
return SDValue();
LoadSDNode *L = P->Load;
assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() &&
"Must be enforced by calculateByteProvider");
assert(L->getOffset().isUndef() && "Unindexed load must have undef offset");
// All loads must share the same chain
SDValue LChain = L->getChain();
if (!Chain)
Chain = LChain;
else if (Chain != LChain)
return SDValue();
// Loads must share the same base address
BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
int64_t ByteOffsetFromBase = 0;
if (!Base)
Base = Ptr;
else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
return SDValue();
// Calculate the offset of the current byte from the base address
ByteOffsetFromBase += MemoryByteOffset(*P);
ByteOffsets[i] = ByteOffsetFromBase;
// Remember the first byte load
if (ByteOffsetFromBase < FirstOffset) {
FirstByteProvider = P;
FirstOffset = ByteOffsetFromBase;
}
Loads.insert(L);
}
assert(!Loads.empty() && "All the bytes of the value must be loaded from "
"memory, so there must be at least one load which produces the value");
assert(Base && "Base address of the accessed memory location must be set");
assert(FirstOffset != INT64_MAX && "First byte offset must be set");
// Check if the bytes of the OR we are looking at match with either big or
// little endian value load
Optional<bool> IsBigEndian = isBigEndian(ByteOffsets, FirstOffset);
if (!IsBigEndian.hasValue())
return SDValue();
assert(FirstByteProvider && "must be set");
// Ensure that the first byte is loaded from zero offset of the first load.
// So the combined value can be loaded from the first load address.
if (MemoryByteOffset(*FirstByteProvider) != 0)
return SDValue();
LoadSDNode *FirstLoad = FirstByteProvider->Load;
// The node we are looking at matches with the pattern, check if we can
// replace it with a single load and bswap if needed.
// If the load needs byte swap check if the target supports it
bool NeedsBswap = IsBigEndianTarget != *IsBigEndian;
// Before legalize we can introduce illegal bswaps which will be later
// converted to an explicit bswap sequence. This way we end up with a single
// load and byte shuffling instead of several loads and byte shuffling.
if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT))
return SDValue();
// Check that a load of the wide type is both allowed and fast on the target
bool Fast = false;
bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
VT, *FirstLoad->getMemOperand(), &Fast);
if (!Allowed || !Fast)
return SDValue();
SDValue NewLoad =
DAG.getLoad(VT, SDLoc(N), Chain, FirstLoad->getBasePtr(),
FirstLoad->getPointerInfo(), FirstLoad->getAlignment());
// Transfer chain users from old loads to the new load.
for (LoadSDNode *L : Loads)
DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1));
return NeedsBswap ? DAG.getNode(ISD::BSWAP, SDLoc(N), VT, NewLoad) : NewLoad;
}
// If the target has andn, bsl, or a similar bit-select instruction,
// we want to unfold masked merge, with canonical pattern of:
// | A | |B|
// ((x ^ y) & m) ^ y
// | D |
// Into:
// (x & m) | (y & ~m)
// If y is a constant, and the 'andn' does not work with immediates,
// we unfold into a different pattern:
// ~(~x & m) & (m | y)
// NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at
// the very least that breaks andnpd / andnps patterns, and because those
// patterns are simplified in IR and shouldn't be created in the DAG
SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) {
assert(N->getOpcode() == ISD::XOR);
// Don't touch 'not' (i.e. where y = -1).
if (isAllOnesOrAllOnesSplat(N->getOperand(1)))
return SDValue();
EVT VT = N->getValueType(0);
// There are 3 commutable operators in the pattern,
// so we have to deal with 8 possible variants of the basic pattern.
SDValue X, Y, M;
auto matchAndXor = [&X, &Y, &M](SDValue And, unsigned XorIdx, SDValue Other) {
if (And.getOpcode() != ISD::AND || !And.hasOneUse())
return false;
SDValue Xor = And.getOperand(XorIdx);
if (Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
return false;
SDValue Xor0 = Xor.getOperand(0);
SDValue Xor1 = Xor.getOperand(1);
// Don't touch 'not' (i.e. where y = -1).
if (isAllOnesOrAllOnesSplat(Xor1))
return false;
if (Other == Xor0)
std::swap(Xor0, Xor1);
if (Other != Xor1)
return false;
X = Xor0;
Y = Xor1;
M = And.getOperand(XorIdx ? 0 : 1);
return true;
};
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (!matchAndXor(N0, 0, N1) && !matchAndXor(N0, 1, N1) &&
!matchAndXor(N1, 0, N0) && !matchAndXor(N1, 1, N0))
return SDValue();
// Don't do anything if the mask is constant. This should not be reachable.
// InstCombine should have already unfolded this pattern, and DAGCombiner
// probably shouldn't produce it, too.
if (isa<ConstantSDNode>(M.getNode()))
return SDValue();
// We can transform if the target has AndNot
if (!TLI.hasAndNot(M))
return SDValue();
SDLoc DL(N);
// If Y is a constant, check that 'andn' works with immediates.
if (!TLI.hasAndNot(Y)) {
assert(TLI.hasAndNot(X) && "Only mask is a variable? Unreachable.");
// If not, we need to do a bit more work to make sure andn is still used.
SDValue NotX = DAG.getNOT(DL, X, VT);
SDValue LHS = DAG.getNode(ISD::AND, DL, VT, NotX, M);
SDValue NotLHS = DAG.getNOT(DL, LHS, VT);
SDValue RHS = DAG.getNode(ISD::OR, DL, VT, M, Y);
return DAG.getNode(ISD::AND, DL, VT, NotLHS, RHS);
}
SDValue LHS = DAG.getNode(ISD::AND, DL, VT, X, M);
SDValue NotM = DAG.getNOT(DL, M, VT);
SDValue RHS = DAG.getNode(ISD::AND, DL, VT, Y, NotM);
return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
}
SDValue DAGCombiner::visitXOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (xor x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
}
// fold (xor undef, undef) -> 0. This is a common idiom (misuse).
SDLoc DL(N);
if (N0.isUndef() && N1.isUndef())
return DAG.getConstant(0, DL, VT);
// fold (xor x, undef) -> undef
if (N0.isUndef())
return N0;
if (N1.isUndef())
return N1;
// fold (xor c1, c2) -> c1^c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::XOR, DL, VT, N0C, N1C);
// canonicalize constant to RHS
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
// fold (xor x, 0) -> x
if (isNullConstant(N1))
return N0;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// reassociate xor
if (SDValue RXOR = reassociateOps(ISD::XOR, DL, N0, N1, N->getFlags()))
return RXOR;
// fold !(x cc y) -> (x !cc y)
unsigned N0Opcode = N0.getOpcode();
SDValue LHS, RHS, CC;
if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) {
ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
LHS.getValueType().isInteger());
if (!LegalOperations ||
TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
switch (N0Opcode) {
default:
llvm_unreachable("Unhandled SetCC Equivalent!");
case ISD::SETCC:
return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC);
case ISD::SELECT_CC:
return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2),
N0.getOperand(3), NotCC);
}
}
}
// fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
if (isOneConstant(N1) && N0Opcode == ISD::ZERO_EXTEND && N0.hasOneUse() &&
isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
SDValue V = N0.getOperand(0);
SDLoc DL0(N0);
V = DAG.getNode(ISD::XOR, DL0, V.getValueType(), V,
DAG.getConstant(1, DL0, V.getValueType()));
AddToWorklist(V.getNode());
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, V);
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
if (isOneConstant(N1) && VT == MVT::i1 && N0.hasOneUse() &&
(N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, DL, VT, LHS, RHS);
}
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
if (isAllOnesConstant(N1) && N0.hasOneUse() &&
(N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, DL, VT, LHS, RHS);
}
}
// fold (not (neg x)) -> (add X, -1)
// FIXME: This can be generalized to (not (sub Y, X)) -> (add X, ~Y) if
// Y is a constant or the subtract has a single use.
if (isAllOnesConstant(N1) && N0.getOpcode() == ISD::SUB &&
isNullConstant(N0.getOperand(0))) {
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1),
DAG.getAllOnesConstant(DL, VT));
}
// fold (xor (and x, y), y) -> (and (not x), y)
if (N0Opcode == ISD::AND && N0.hasOneUse() && N0->getOperand(1) == N1) {
SDValue X = N0.getOperand(0);
SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
AddToWorklist(NotX.getNode());
return DAG.getNode(ISD::AND, DL, VT, NotX, N1);
}
if ((N0Opcode == ISD::SRL || N0Opcode == ISD::SHL) && N0.hasOneUse()) {
ConstantSDNode *XorC = isConstOrConstSplat(N1);
ConstantSDNode *ShiftC = isConstOrConstSplat(N0.getOperand(1));
unsigned BitWidth = VT.getScalarSizeInBits();
if (XorC && ShiftC) {
// Don't crash on an oversized shift. We can not guarantee that a bogus
// shift has been simplified to undef.
uint64_t ShiftAmt = ShiftC->getLimitedValue();
if (ShiftAmt < BitWidth) {
APInt Ones = APInt::getAllOnesValue(BitWidth);
Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
if (XorC->getAPIntValue() == Ones) {
// If the xor constant is a shifted -1, do a 'not' before the shift:
// xor (X << ShiftC), XorC --> (not X) << ShiftC
// xor (X >> ShiftC), XorC --> (not X) >> ShiftC
SDValue Not = DAG.getNOT(DL, N0.getOperand(0), VT);
return DAG.getNode(N0Opcode, DL, VT, Not, N0.getOperand(1));
}
}
}
}
// fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X)
if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
SDValue A = N0Opcode == ISD::ADD ? N0 : N1;
SDValue S = N0Opcode == ISD::SRA ? N0 : N1;
if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) {
SDValue A0 = A.getOperand(0), A1 = A.getOperand(1);
SDValue S0 = S.getOperand(0);
if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0)) {
unsigned OpSizeInBits = VT.getScalarSizeInBits();
if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1)))
if (C->getAPIntValue() == (OpSizeInBits - 1))
return DAG.getNode(ISD::ABS, DL, VT, S0);
}
}
}
// fold (xor x, x) -> 0
if (N0 == N1)
return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
// fold (xor (shl 1, x), -1) -> (rotl ~1, x)
// Here is a concrete example of this equivalence:
// i16 x == 14
// i16 shl == 1 << 14 == 16384 == 0b0100000000000000
// i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111
//
// =>
//
// i16 ~1 == 0b1111111111111110
// i16 rol(~1, 14) == 0b1011111111111111
//
// Some additional tips to help conceptualize this transform:
// - Try to see the operation as placing a single zero in a value of all ones.
// - There exists no value for x which would allow the result to contain zero.
// - Values of x larger than the bitwidth are undefined and do not require a
// consistent result.
// - Pushing the zero left requires shifting one bits in from the right.
// A rotate left of ~1 is a nice way of achieving the desired result.
if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0Opcode == ISD::SHL &&
isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
N0.getOperand(1));
}
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
if (N0Opcode == N1.getOpcode())
if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
return V;
// Unfold ((x ^ y) & m) ^ y into (x & m) | (y & ~m) if profitable
if (SDValue MM = unfoldMaskedMerge(N))
return MM;
// Simplify the expression using non-local knowledge.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
/// Handle transforms common to the three shifts, when the shift amount is a
/// constant.
/// We are looking for: (shift being one of shl/sra/srl)
/// shift (binop X, C0), C1
/// And want to transform into:
/// binop (shift X, C1), (shift C0, C1)
SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
// Do not turn a 'not' into a regular xor.
if (isBitwiseNot(N->getOperand(0)))
return SDValue();
// The inner binop must be one-use, since we want to replace it.
SDNode *LHS = N->getOperand(0).getNode();
if (!LHS->hasOneUse()) return SDValue();
// We want to pull some binops through shifts, so that we have (and (shift))
// instead of (shift (and)), likewise for add, or, xor, etc. This sort of
// thing happens with address calculations, so it's important to canonicalize
// it.
switch (LHS->getOpcode()) {
default:
return SDValue();
case ISD::OR:
case ISD::XOR:
case ISD::AND:
break;
case ISD::ADD:
if (N->getOpcode() != ISD::SHL)
return SDValue(); // only shl(add) not sr[al](add).
break;
}
// We require the RHS of the binop to be a constant and not opaque as well.
ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1));
if (!BinOpCst)
return SDValue();
// FIXME: disable this unless the input to the binop is a shift by a constant
// or is copy/select. Enable this in other cases when figure out it's exactly
// profitable.
SDValue BinOpLHSVal = LHS->getOperand(0);
bool IsShiftByConstant = (BinOpLHSVal.getOpcode() == ISD::SHL ||
BinOpLHSVal.getOpcode() == ISD::SRA ||
BinOpLHSVal.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(BinOpLHSVal.getOperand(1));
bool IsCopyOrSelect = BinOpLHSVal.getOpcode() == ISD::CopyFromReg ||
BinOpLHSVal.getOpcode() == ISD::SELECT;
if (!IsShiftByConstant && !IsCopyOrSelect)
return SDValue();
if (IsCopyOrSelect && N->hasOneUse())
return SDValue();
EVT VT = N->getValueType(0);
if (!TLI.isDesirableToCommuteWithShift(N, Level))
return SDValue();
// Fold the constants, shifting the binop RHS by the shift amount.
SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)),
N->getValueType(0),
LHS->getOperand(1), N->getOperand(1));
assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!");
// Create the new shift.
SDValue NewShift = DAG.getNode(N->getOpcode(),
SDLoc(LHS->getOperand(0)),
VT, LHS->getOperand(0), N->getOperand(1));
// Create the new binop.
return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS);
}
SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
assert(N->getOpcode() == ISD::TRUNCATE);
assert(N->getOperand(0).getOpcode() == ISD::AND);
// (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
EVT TruncVT = N->getValueType(0);
if (N->hasOneUse() && N->getOperand(0).hasOneUse() &&
TLI.isTypeDesirableForOp(ISD::AND, TruncVT)) {
SDValue N01 = N->getOperand(0).getOperand(1);
if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) {
SDLoc DL(N);
SDValue N00 = N->getOperand(0).getOperand(0);
SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00);
SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01);
AddToWorklist(Trunc00.getNode());
AddToWorklist(Trunc01.getNode());
return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01);
}
}
return SDValue();
}
SDValue DAGCombiner::visitRotate(SDNode *N) {
SDLoc dl(N);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
unsigned Bitsize = VT.getScalarSizeInBits();
// fold (rot x, 0) -> x
if (isNullOrNullSplat(N1))
return N0;
// fold (rot x, c) -> x iff (c % BitSize) == 0
if (isPowerOf2_32(Bitsize) && Bitsize > 1) {
APInt ModuloMask(N1.getScalarValueSizeInBits(), Bitsize - 1);
if (DAG.MaskedValueIsZero(N1, ModuloMask))
return N0;
}
// fold (rot x, c) -> (rot x, c % BitSize)
// TODO - support non-uniform vector amounts.
if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) {
if (Cst->getAPIntValue().uge(Bitsize)) {
uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize);
return DAG.getNode(N->getOpcode(), dl, VT, N0,
DAG.getConstant(RotAmt, dl, N1.getValueType()));
}
}
// fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1);
}
unsigned NextOp = N0.getOpcode();
// fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
EVT ShiftVT = C1->getValueType(0);
bool SameSide = (N->getOpcode() == NextOp);
unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
if (SDValue CombinedShift =
DAG.FoldConstantArithmetic(CombineOp, dl, ShiftVT, C1, C2)) {
SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
ISD::SREM, dl, ShiftVT, CombinedShift.getNode(),
BitsizeC.getNode());
return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0),
CombinedShiftNorm);
}
}
}
return SDValue();
}
SDValue DAGCombiner::visitSHL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (SDValue V = DAG.simplifyShift(N0, N1))
return V;
EVT VT = N0.getValueType();
EVT ShiftVT = N1.getValueType();
unsigned OpSizeInBits = VT.getScalarSizeInBits();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
// If setcc produces all-one true value then:
// (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
if (N1CV && N1CV->isConstant()) {
if (N0.getOpcode() == ISD::AND) {
SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT,
N01CV, N1CV))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
}
}
}
}
ConstantSDNode *N1C = isConstOrConstSplat(N1);
// fold (shl c1, c2) -> c1<<c2
// TODO - support non-uniform vector shift amounts.
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// if (shl x, c) is known to be zero, return 0
if (DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
}
// TODO - support non-uniform vector shift amounts.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
if (N0.getOpcode() == ISD::SHL) {
auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).uge(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
return DAG.getConstant(0, SDLoc(N), VT);
auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).ult(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
SDLoc DL(N);
SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Sum);
}
}
// fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2))
// For this to be valid, the second form must not preserve any of the bits
// that are shifted out by the inner shift in the first form. This means
// the outer shift size must be >= the number of bits added by the ext.
// As a corollary, we don't care what kind of ext it is.
if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND) &&
N0.getOperand(0).getOpcode() == ISD::SHL) {
SDValue N0Op0 = N0.getOperand(0);
SDValue InnerShiftAmt = N0Op0.getOperand(1);
EVT InnerVT = N0Op0.getValueType();
uint64_t InnerBitwidth = InnerVT.getScalarSizeInBits();
auto MatchOutOfRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return c2.uge(OpSizeInBits - InnerBitwidth) &&
(c1 + c2).uge(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchOutOfRange,
/*AllowUndefs*/ false,
/*AllowTypeMismatch*/ true))
return DAG.getConstant(0, SDLoc(N), VT);
auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return c2.uge(OpSizeInBits - InnerBitwidth) &&
(c1 + c2).ult(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange,
/*AllowUndefs*/ false,
/*AllowTypeMismatch*/ true)) {
SDLoc DL(N);
SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0));
SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT);
Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1);
return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum);
}
}
// fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
// Only fold this if the inner zext has no other uses to avoid increasing
// the total number of instructions.
if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == ISD::SRL) {
SDValue N0Op0 = N0.getOperand(0);
SDValue InnerShiftAmt = N0Op0.getOperand(1);
auto MatchEqual = [VT](ConstantSDNode *LHS, ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2);
return c1.ult(VT.getScalarSizeInBits()) && (c1 == c2);
};
if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchEqual,
/*AllowUndefs*/ false,
/*AllowTypeMismatch*/ true)) {
SDLoc DL(N);
EVT InnerShiftAmtVT = N0Op0.getOperand(1).getValueType();
SDValue NewSHL = DAG.getZExtOrTrunc(N1, DL, InnerShiftAmtVT);
NewSHL = DAG.getNode(ISD::SHL, DL, N0Op0.getValueType(), N0Op0, NewSHL);
AddToWorklist(NewSHL.getNode());
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
}
}
// fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
// fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
// TODO - support non-uniform vector shift amounts.
if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
N0->getFlags().hasExact()) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t C1 = N0C1->getZExtValue();
uint64_t C2 = N1C->getZExtValue();
SDLoc DL(N);
if (C1 <= C2)
return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(C2 - C1, DL, ShiftVT));
return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
DAG.getConstant(C1 - C2, DL, ShiftVT));
}
}
// fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
// (and (srl x, (sub c1, c2), MASK)
// Only fold this if the inner shift has no other uses -- if it does, folding
// this will increase the total number of instructions.
// TODO - drop hasOneUse requirement if c1 == c2?
// TODO - support non-uniform vector shift amounts.
if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
TLI.shouldFoldConstantShiftPairToMask(N, Level)) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
if (N0C1->getAPIntValue().ult(OpSizeInBits)) {
uint64_t c1 = N0C1->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
SDValue Shift;
if (c2 > c1) {
Mask <<= c2 - c1;
SDLoc DL(N);
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, ShiftVT));
} else {
Mask.lshrInPlace(c1 - c2);
SDLoc DL(N);
Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
DAG.getConstant(c1 - c2, DL, ShiftVT));
}
SDLoc DL(N0);
return DAG.getNode(ISD::AND, DL, VT, Shift,
DAG.getConstant(Mask, DL, VT));
}
}
}
// fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) &&
isConstantOrConstantVector(N1, /* No Opaques */ true)) {
SDLoc DL(N);
SDValue AllBits = DAG.getAllOnesConstant(DL, VT);
SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1);
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask);
}
// fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
// fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
// Variant of version done on multiply, except mul by a power of 2 is turned
// into a shift.
if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
N0.getNode()->hasOneUse() &&
isConstantOrConstantVector(N1, /* No Opaques */ true) &&
isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true) &&
TLI.isDesirableToCommuteWithShift(N, Level)) {
SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
AddToWorklist(Shl0.getNode());
AddToWorklist(Shl1.getNode());
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, Shl0, Shl1);
}
// fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() &&
isConstantOrConstantVector(N1, /* No Opaques */ true) &&
isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) {
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
if (isConstantOrConstantVector(Shl))
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl);
}
if (N1C && !N1C->isOpaque())
if (SDValue NewSHL = visitShiftByConstant(N, N1C))
return NewSHL;
return SDValue();
}
SDValue DAGCombiner::visitSRA(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (SDValue V = DAG.simplifyShift(N0, N1))
return V;
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarSizeInBits();
// Arithmetic shifting an all-sign-bit value is a no-op.
// fold (sra 0, x) -> 0
// fold (sra -1, x) -> -1
if (DAG.ComputeNumSignBits(N0) == OpSizeInBits)
return N0;
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
ConstantSDNode *N1C = isConstOrConstSplat(N1);
// fold (sra c1, c2) -> (sra c1, c2)
// TODO - support non-uniform vector shift amounts.
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
// sext_inreg.
if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
if (VT.isVector())
ExtVT = EVT::getVectorVT(*DAG.getContext(),
ExtVT, VT.getVectorNumElements());
if ((!LegalOperations ||
TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
N0.getOperand(0), DAG.getValueType(ExtVT));
}
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
// clamp (add c1, c2) to max shift.
if (N0.getOpcode() == ISD::SRA) {
SDLoc DL(N);
EVT ShiftVT = N1.getValueType();
EVT ShiftSVT = ShiftVT.getScalarType();
SmallVector<SDValue, 16> ShiftValues;
auto SumOfShifts = [&](ConstantSDNode *LHS, ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
APInt Sum = c1 + c2;
unsigned ShiftSum =
Sum.uge(OpSizeInBits) ? (OpSizeInBits - 1) : Sum.getZExtValue();
ShiftValues.push_back(DAG.getConstant(ShiftSum, DL, ShiftSVT));
return true;
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), SumOfShifts)) {
SDValue ShiftValue;
if (VT.isVector())
ShiftValue = DAG.getBuildVector(ShiftVT, DL, ShiftValues);
else
ShiftValue = ShiftValues[0];
return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), ShiftValue);
}
}
// fold (sra (shl X, m), (sub result_size, n))
// -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
// result_size - n != m.
// If truncate is free for the target sext(shl) is likely to result in better
// code.
if (N0.getOpcode() == ISD::SHL && N1C) {
// Get the two constanst of the shifts, CN0 = m, CN = n.
const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
if (N01C) {
LLVMContext &Ctx = *DAG.getContext();
// Determine what the truncate's result bitsize and type would be.
EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
if (VT.isVector())
TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());
// Determine the residual right-shift amount.
int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
// If the shift is not a no-op (in which case this should be just a sign
// extend already), the truncated to type is legal, sign_extend is legal
// on that type, and the truncate to that type is both legal and free,
// perform the transform.
if ((ShiftAmt > 0) &&
TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
TLI.isTruncateFree(VT, TruncVT)) {
SDLoc DL(N);
SDValue Amt = DAG.getConstant(ShiftAmt, DL,
getShiftAmountTy(N0.getOperand(0).getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, DL, VT,
N0.getOperand(0), Amt);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT,
Shift);
return DAG.getNode(ISD::SIGN_EXTEND, DL,
N->getValueType(0), Trunc);
}
}
}
// fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
}
// fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
// fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
// if c1 is equal to the number of bits the trunc removes
// TODO - support non-uniform vector shift amounts.
if (N0.getOpcode() == ISD::TRUNCATE &&
(N0.getOperand(0).getOpcode() == ISD::SRL ||
N0.getOperand(0).getOpcode() == ISD::SRA) &&
N0.getOperand(0).hasOneUse() &&
N0.getOperand(0).getOperand(1).hasOneUse() && N1C) {
SDValue N0Op0 = N0.getOperand(0);
if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
EVT LargeVT = N0Op0.getValueType();
unsigned TruncBits = LargeVT.getScalarSizeInBits() - OpSizeInBits;
if (LargeShift->getAPIntValue() == TruncBits) {
SDLoc DL(N);
SDValue Amt = DAG.getConstant(N1C->getZExtValue() + TruncBits, DL,
getShiftAmountTy(LargeVT));
SDValue SRA =
DAG.getNode(ISD::SRA, DL, LargeVT, N0Op0.getOperand(0), Amt);
return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA);
}
}
}
// Simplify, based on bits shifted out of the LHS.
// TODO - support non-uniform vector shift amounts.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// If the sign bit is known to be zero, switch this to a SRL.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
if (N1C && !N1C->isOpaque())
if (SDValue NewSRA = visitShiftByConstant(N, N1C))
return NewSRA;
return SDValue();
}
SDValue DAGCombiner::visitSRL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (SDValue V = DAG.simplifyShift(N0, N1))
return V;
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarSizeInBits();
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
ConstantSDNode *N1C = isConstOrConstSplat(N1);
// fold (srl c1, c2) -> c1 >>u c2
// TODO - support non-uniform vector shift amounts.
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// if (srl x, c) is known to be zero, return 0
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
if (N0.getOpcode() == ISD::SRL) {
auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).uge(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
return DAG.getConstant(0, SDLoc(N), VT);
auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).ult(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
SDLoc DL(N);
EVT ShiftVT = N1.getValueType();
SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum);
}
}
// fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
// TODO - support non-uniform vector shift amounts.
if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
N0.getOperand(0).getOpcode() == ISD::SRL) {
if (auto N001C = isConstOrConstSplat(N0.getOperand(0).getOperand(1))) {
uint64_t c1 = N001C->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
EVT InnerShiftVT = N0.getOperand(0).getValueType();
EVT ShiftCountVT = N0.getOperand(0).getOperand(1).getValueType();
uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
// This is only valid if the OpSizeInBits + c1 = size of inner shift.
if (c1 + OpSizeInBits == InnerShiftSize) {
SDLoc DL(N0);
if (c1 + c2 >= InnerShiftSize)
return DAG.getConstant(0, DL, VT);
return DAG.getNode(ISD::TRUNCATE, DL, VT,
DAG.getNode(ISD::SRL, DL, InnerShiftVT,
N0.getOperand(0).getOperand(0),
DAG.getConstant(c1 + c2, DL,
ShiftCountVT)));
}
}
}
// fold (srl (shl x, c), c) -> (and x, cst2)
// TODO - (srl (shl x, c1), c2).
if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
isConstantOrConstantVector(N1, /* NoOpaques */ true)) {
SDLoc DL(N);
SDValue Mask =
DAG.getNode(ISD::SRL, DL, VT, DAG.getAllOnesConstant(DL, VT), N1);
AddToWorklist(Mask.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask);
}
// fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
// TODO - support non-uniform vector shift amounts.
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
// Shifting in all undef bits?
EVT SmallVT = N0.getOperand(0).getValueType();
unsigned BitSize = SmallVT.getScalarSizeInBits();
if (N1C->getAPIntValue().uge(BitSize))
return DAG.getUNDEF(VT);
if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
uint64_t ShiftAmt = N1C->getZExtValue();
SDLoc DL0(N0);
SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT,
N0.getOperand(0),
DAG.getConstant(ShiftAmt, DL0,
getShiftAmountTy(SmallVT)));
AddToWorklist(SmallShift.getNode());
APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
DAG.getConstant(Mask, DL, VT));
}
}
// fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
// bit, which is unmodified by sra.
if (N1C && N1C->getAPIntValue() == (OpSizeInBits - 1)) {
if (N0.getOpcode() == ISD::SRA)
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
}
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
if (N1C && N0.getOpcode() == ISD::CTLZ &&
N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
KnownBits Known = DAG.computeKnownBits(N0.getOperand(0));
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
// If all of the bits input the to ctlz node are known to be zero, then
// the result of the ctlz is "32" and the result of the shift is one.
APInt UnknownBits = ~Known.Zero;
if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);
// Otherwise, check to see if there is exactly one bit input to the ctlz.
if (UnknownBits.isPowerOf2()) {
// Okay, we know that only that the single bit specified by UnknownBits
// could be set on input to the CTLZ node. If this bit is set, the SRL
// will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
// to an SRL/XOR pair, which is likely to simplify more.
unsigned ShAmt = UnknownBits.countTrailingZeros();
SDValue Op = N0.getOperand(0);
if (ShAmt) {
SDLoc DL(N0);
Op = DAG.getNode(ISD::SRL, DL, VT, Op,
DAG.getConstant(ShAmt, DL,
getShiftAmountTy(Op.getValueType())));
AddToWorklist(Op.getNode());
}
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT,
Op, DAG.getConstant(1, DL, VT));
}
}
// fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
}
// fold operands of srl based on knowledge that the low bits are not
// demanded.
// TODO - support non-uniform vector shift amounts.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
if (N1C && !N1C->isOpaque())
if (SDValue NewSRL = visitShiftByConstant(N, N1C))
return NewSRL;
// Attempt to convert a srl of a load into a narrower zero-extending load.
if (SDValue NarrowLoad = ReduceLoadWidth(N))
return NarrowLoad;
// Here is a common situation. We want to optimize:
//
// %a = ...
// %b = and i32 %a, 2
// %c = srl i32 %b, 1
// brcond i32 %c ...
//
// into
//
// %a = ...
// %b = and %a, 2
// %c = setcc eq %b, 0
// brcond %c ...
//
// However when after the source operand of SRL is optimized into AND, the SRL
// itself may not be optimized further. Look for it and add the BRCOND into
// the worklist.
if (N->hasOneUse()) {
SDNode *Use = *N->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
AddToWorklist(Use);
else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
// Also look pass the truncate.
Use = *Use->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
AddToWorklist(Use);
}
}
return SDValue();
}
SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
bool IsFSHL = N->getOpcode() == ISD::FSHL;
unsigned BitWidth = VT.getScalarSizeInBits();
// fold (fshl N0, N1, 0) -> N0
// fold (fshr N0, N1, 0) -> N1
if (isPowerOf2_32(BitWidth))
if (DAG.MaskedValueIsZero(
N2, APInt(N2.getScalarValueSizeInBits(), BitWidth - 1)))
return IsFSHL ? N0 : N1;
auto IsUndefOrZero = [](SDValue V) {
return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
};
// TODO - support non-uniform vector shift amounts.
if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) {
EVT ShAmtTy = N2.getValueType();
// fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth)
if (Cst->getAPIntValue().uge(BitWidth)) {
uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth);
return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0, N1,
DAG.getConstant(RotAmt, SDLoc(N), ShAmtTy));
}
unsigned ShAmt = Cst->getZExtValue();
if (ShAmt == 0)
return IsFSHL ? N0 : N1;
// fold fshl(undef_or_zero, N1, C) -> lshr(N1, BW-C)
// fold fshr(undef_or_zero, N1, C) -> lshr(N1, C)
// fold fshl(N0, undef_or_zero, C) -> shl(N0, C)
// fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C)
if (IsUndefOrZero(N0))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1,
DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt,
SDLoc(N), ShAmtTy));
if (IsUndefOrZero(N1))
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt,
SDLoc(N), ShAmtTy));
}
// fold fshr(undef_or_zero, N1, N2) -> lshr(N1, N2)
// fold fshl(N0, undef_or_zero, N2) -> shl(N0, N2)
// iff We know the shift amount is in range.
// TODO: when is it worth doing SUB(BW, N2) as well?
if (isPowerOf2_32(BitWidth)) {
APInt ModuloBits(N2.getScalarValueSizeInBits(), BitWidth - 1);
if (IsUndefOrZero(N0) && !IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1, N2);
if (IsUndefOrZero(N1) && IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, N2);
}
// fold (fshl N0, N0, N2) -> (rotl N0, N2)
// fold (fshr N0, N0, N2) -> (rotr N0, N2)
// TODO: Investigate flipping this rotate if only one is legal, if funnel shift
// is legal as well we might be better off avoiding non-constant (BW - N2).
unsigned RotOpc = IsFSHL ? ISD::ROTL : ISD::ROTR;
if (N0 == N1 && hasOperation(RotOpc, VT))
return DAG.getNode(RotOpc, SDLoc(N), VT, N0, N2);
// Simplify, based on bits shifted out of N0/N1.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
SDValue DAGCombiner::visitABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (abs c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0);
// fold (abs (abs x)) -> (abs x)
if (N0.getOpcode() == ISD::ABS)
return N0;
// fold (abs x) -> x iff not-negative
if (DAG.SignBitIsZero(N0))
return N0;
return SDValue();
}
SDValue DAGCombiner::visitBSWAP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (bswap c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
// fold (bswap (bswap x)) -> x
if (N0.getOpcode() == ISD::BSWAP)
return N0->getOperand(0);
return SDValue();
}
SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (bitreverse c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0);
// fold (bitreverse (bitreverse x)) -> x
if (N0.getOpcode() == ISD::BITREVERSE)
return N0.getOperand(0);
return SDValue();
}
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctlz c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
// If the value is known never to be zero, switch to the undef version.
if (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ_ZERO_UNDEF, VT)) {
if (DAG.isKnownNeverZero(N0))
return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
}
return SDValue();
}
SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctlz_zero_undef c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTTZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (cttz c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
// If the value is known never to be zero, switch to the undef version.
if (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ_ZERO_UNDEF, VT)) {
if (DAG.isKnownNeverZero(N0))
return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
}
return SDValue();
}
SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (cttz_zero_undef c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTPOP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctpop c1) -> c2
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
return SDValue();
}
// FIXME: This should be checking for no signed zeros on individual operands, as
// well as no nans.
static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS,
SDValue RHS,
const TargetLowering &TLI) {
const TargetOptions &Options = DAG.getTarget().Options;
EVT VT = LHS.getValueType();
return Options.NoSignedZerosFPMath && VT.isFloatingPoint() &&
TLI.isProfitableToCombineMinNumMaxNum(VT) &&
DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS);
}
/// Generate Min/Max node
static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
SDValue RHS, SDValue True, SDValue False,
ISD::CondCode CC, const TargetLowering &TLI,
SelectionDAG &DAG) {
if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
return SDValue();
EVT TransformVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
switch (CC) {
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETLT:
case ISD::SETLE:
case ISD::SETULT:
case ISD::SETULE: {
// Since it's known never nan to get here already, either fminnum or
// fminnum_ieee are OK. Try the ieee version first, since it's fminnum is
// expanded in terms of it.
unsigned IEEEOpcode = (LHS == True) ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);
unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM;
if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
return SDValue();
}
case ISD::SETOGT:
case ISD::SETOGE:
case ISD::SETGT:
case ISD::SETGE:
case ISD::SETUGT:
case ISD::SETUGE: {
unsigned IEEEOpcode = (LHS == True) ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);
unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM;
if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
return SDValue();
}
default:
return SDValue();
}
}
SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
SDValue Cond = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
EVT CondVT = Cond.getValueType();
SDLoc DL(N);
if (!VT.isInteger())
return SDValue();
auto *C1 = dyn_cast<ConstantSDNode>(N1);
auto *C2 = dyn_cast<ConstantSDNode>(N2);
if (!C1 || !C2)
return SDValue();
// Only do this before legalization to avoid conflicting with target-specific
// transforms in the other direction (create a select from a zext/sext). There
// is also a target-independent combine here in DAGCombiner in the other
// direction for (select Cond, -1, 0) when the condition is not i1.
if (CondVT == MVT::i1 && !LegalOperations) {
if (C1->isNullValue() && C2->isOne()) {
// select Cond, 0, 1 --> zext (!Cond)
SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
if (VT != MVT::i1)
NotCond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotCond);
return NotCond;
}
if (C1->isNullValue() && C2->isAllOnesValue()) {
// select Cond, 0, -1 --> sext (!Cond)
SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
if (VT != MVT::i1)
NotCond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NotCond);
return NotCond;
}
if (C1->isOne() && C2->isNullValue()) {
// select Cond, 1, 0 --> zext (Cond)
if (VT != MVT::i1)
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
return Cond;
}
if (C1->isAllOnesValue() && C2->isNullValue()) {
// select Cond, -1, 0 --> sext (Cond)
if (VT != MVT::i1)
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
return Cond;
}
// For any constants that differ by 1, we can transform the select into an
// extend and add. Use a target hook because some targets may prefer to
// transform in the other direction.
if (TLI.convertSelectOfConstantsToMath(VT)) {
if (C1->getAPIntValue() - 1 == C2->getAPIntValue()) {
// select Cond, C1, C1-1 --> add (zext Cond), C1-1
if (VT != MVT::i1)
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
}
if (C1->getAPIntValue() + 1 == C2->getAPIntValue()) {
// select Cond, C1, C1+1 --> add (sext Cond), C1+1
if (VT != MVT::i1)
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
}
}
return SDValue();
}
// fold (select Cond, 0, 1) -> (xor Cond, 1)
// We can't do this reliably if integer based booleans have different contents
// to floating point based booleans. This is because we can't tell whether we
// have an integer-based boolean or a floating-point-based boolean unless we
// can find the SETCC that produced it and inspect its operands. This is
// fairly easy if C is the SETCC node, but it can potentially be
// undiscoverable (or not reasonably discoverable). For example, it could be
// in another basic block or it could require searching a complicated
// expression.
if (CondVT.isInteger() &&
TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) ==
TargetLowering::ZeroOrOneBooleanContent &&
TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) ==
TargetLowering::ZeroOrOneBooleanContent &&
C1->isNullValue() && C2->isOne()) {
SDValue NotCond =
DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT));
if (VT.bitsEq(CondVT))
return NotCond;
return DAG.getZExtOrTrunc(NotCond, DL, VT);
}
return SDValue();
}
SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
EVT VT0 = N0.getValueType();
SDLoc DL(N);
SDNodeFlags Flags = N->getFlags();
if (SDValue V = DAG.simplifySelect(N0, N1, N2))
return V;
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or C, Y)
if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
return DAG.getNode(ISD::OR, DL, VT, N0, N2);
if (SDValue V = foldSelectOfConstants(N))
return V;
// fold (select C, 0, X) -> (and (not C), X)
if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::AND, DL, VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::OR, DL, VT, NOTNode, N1);
}
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
return DAG.getNode(ISD::AND, DL, VT, N0, N1);
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N1, N2))
return SDValue(N, 0); // Don't revisit N.
if (VT0 == MVT::i1) {
// The code in this block deals with the following 2 equivalences:
// select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y))
// select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y)
// The target can specify its preferred form with the
// shouldNormalizeToSelectSequence() callback. However we always transform
// to the right anyway if we find the inner select exists in the DAG anyway
// and we always transform to the left side if we know that we can further
// optimize the combination of the conditions.
bool normalizeToSequence =
TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
// select (and Cond0, Cond1), X, Y
// -> select Cond0, (select Cond1, X, Y), Y
if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
SDValue InnerSelect =
DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2, Flags);
if (normalizeToSequence || !InnerSelect.use_empty())
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0,
InnerSelect, N2, Flags);
// Cleanup on failure.
if (InnerSelect.use_empty())
recursivelyDeleteUnusedNodes(InnerSelect.getNode());
}
// select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
SDValue InnerSelect = DAG.getNode(ISD::SELECT, DL, N1.getValueType(),
Cond1, N1, N2, Flags);
if (normalizeToSequence || !InnerSelect.use_empty())
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1,
InnerSelect, Flags);
// Cleanup on failure.
if (InnerSelect.use_empty())
recursivelyDeleteUnusedNodes(InnerSelect.getNode());
}
// select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) {
SDValue N1_0 = N1->getOperand(0);
SDValue N1_1 = N1->getOperand(1);
SDValue N1_2 = N1->getOperand(2);
if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
// Create the actual and node if we can generate good code for it.
if (!normalizeToSequence) {
SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0);
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1,
N2, Flags);
}
// Otherwise see if we can optimize the "and" to a better pattern.
if (SDValue Combined = visitANDLike(N0, N1_0, N)) {
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1,
N2, Flags);
}
}
}
// select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) {
SDValue N2_0 = N2->getOperand(0);
SDValue N2_1 = N2->getOperand(1);
SDValue N2_2 = N2->getOperand(2);
if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
// Create the actual or node if we can generate good code for it.
if (!normalizeToSequence) {
SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0);
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1,
N2_2, Flags);
}
// Otherwise see if we can optimize to a better pattern.
if (SDValue Combined = visitORLike(N0, N2_0, N))
return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1,
N2_2, Flags);
}
}
}
// select (not Cond), N1, N2 -> select Cond, N2, N1
if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false)) {
SDValue SelectOp = DAG.getSelect(DL, VT, F, N2, N1);
SelectOp->setFlags(Flags);
return SelectOp;
}
// Fold selects based on a setcc into other things, such as min/max/abs.
if (N0.getOpcode() == ISD::SETCC) {
SDValue Cond0 = N0.getOperand(0), Cond1 = N0.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
// select (fcmp lt x, y), x, y -> fminnum x, y
// select (fcmp gt x, y), x, y -> fmaxnum x, y
//
// This is OK if we don't care what happens if either operand is a NaN.
if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, N1, N2, TLI))
if (SDValue FMinMax = combineMinNumMaxNum(DL, VT, Cond0, Cond1, N1, N2,
CC, TLI, DAG))
return FMinMax;
// Use 'unsigned add with overflow' to optimize an unsigned saturating add.
// This is conservatively limited to pre-legal-operations to give targets
// a chance to reverse the transform if they want to do that. Also, it is
// unlikely that the pattern would be formed late, so it's probably not
// worth going through the other checks.
if (!LegalOperations && TLI.isOperationLegalOrCustom(ISD::UADDO, VT) &&
CC == ISD::SETUGT && N0.hasOneUse() && isAllOnesConstant(N1) &&
N2.getOpcode() == ISD::ADD && Cond0 == N2.getOperand(0)) {
auto *C = dyn_cast<ConstantSDNode>(N2.getOperand(1));
auto *NotC = dyn_cast<ConstantSDNode>(Cond1);
if (C && NotC && C->getAPIntValue() == ~NotC->getAPIntValue()) {
// select (setcc Cond0, ~C, ugt), -1, (add Cond0, C) -->
// uaddo Cond0, C; select uaddo.1, -1, uaddo.0
//
// The IR equivalent of this transform would have this form:
// %a = add %x, C
// %c = icmp ugt %x, ~C
// %r = select %c, -1, %a
// =>
// %u = call {iN,i1} llvm.uadd.with.overflow(%x, C)
// %u0 = extractvalue %u, 0
// %u1 = extractvalue %u, 1
// %r = select %u1, -1, %u0
SDVTList VTs = DAG.getVTList(VT, VT0);
SDValue UAO = DAG.getNode(ISD::UADDO, DL, VTs, Cond0, N2.getOperand(1));
return DAG.getSelect(DL, VT, UAO.getValue(1), N1, UAO.getValue(0));
}
}
if (TLI.isOperationLegal(ISD::SELECT_CC, VT) ||
(!LegalOperations &&
TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))) {
// Any flags available in a select/setcc fold will be on the setcc as they
// migrated from fcmp
Flags = N0.getNode()->getFlags();
SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1,
N2, N0.getOperand(2));
SelectNode->setFlags(Flags);
return SelectNode;
}
return SimplifySelect(DL, N0, N1, N2);
}
return SDValue();
}
static
std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Split the inputs.
SDValue Lo, Hi, LL, LH, RL, RH;
std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
return std::make_pair(Lo, Hi);
}
// This function assumes all the vselect's arguments are CONCAT_VECTOR
// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = N->getValueType(0);
int NumElems = VT.getVectorNumElements();
assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&
RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR);
// CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
// binary ones here.
if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
return SDValue();
// We're sure we have an even number of elements due to the
// concat_vectors we have as arguments to vselect.
// Skip BV elements until we find one that's not an UNDEF
// After we find an UNDEF element, keep looping until we get to half the
// length of the BV and see if all the non-undef nodes are the same.
ConstantSDNode *BottomHalf = nullptr;
for (int i = 0; i < NumElems / 2; ++i) {
if (Cond->getOperand(i)->isUndef())
continue;
if (BottomHalf == nullptr)
BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
else if (Cond->getOperand(i).getNode() != BottomHalf)
return SDValue();
}
// Do the same for the second half of the BuildVector
ConstantSDNode *TopHalf = nullptr;
for (int i = NumElems / 2; i < NumElems; ++i) {
if (Cond->getOperand(i)->isUndef())
continue;
if (TopHalf == nullptr)
TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
else if (Cond->getOperand(i).getNode() != TopHalf)
return SDValue();
}
assert(TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function.");
return DAG.getNode(
ISD::CONCAT_VECTORS, DL, VT,
BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
}
SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
SDValue Mask = MSC->getMask();
SDValue Data = MSC->getValue();
SDValue Chain = MSC->getChain();
SDLoc DL(N);
// Zap scatters with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return Chain;
if (Level >= AfterLegalizeTypes)
return SDValue();
// If the MSCATTER data type requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() != ISD::SETCC)
return SDValue();
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0));
EVT MemoryVT = MSC->getMemoryVT();
unsigned Alignment = MSC->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
SDValue Scale = MSC->getScale();
SDValue BasePtr = MSC->getBasePtr();
SDValue IndexLo, IndexHi;
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MSC->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MSC->getAAInfo(), MSC->getRanges());
SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo, Scale };
SDValue Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other),
DataLo.getValueType(), DL, OpsLo, MMO);
// The order of the Scatter operation after split is well defined. The "Hi"
// part comes after the "Lo". So these two operations should be chained one
// after another.
SDValue OpsHi[] = { Lo, DataHi, MaskHi, BasePtr, IndexHi, Scale };
return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(),
DL, OpsHi, MMO);
}
SDValue DAGCombiner::visitMSTORE(SDNode *N) {
MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
SDValue Mask = MST->getMask();
SDValue Data = MST->getValue();
SDValue Chain = MST->getChain();
EVT VT = Data.getValueType();
SDLoc DL(N);
// Zap masked stores with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return Chain;
if (Level >= AfterLegalizeTypes)
return SDValue();
// If the MSTORE data type requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() == ISD::SETCC) {
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
SDValue Ptr = MST->getBasePtr();
EVT MemoryVT = MST->getMemoryVT();
unsigned Alignment = MST->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MST->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MST->getAAInfo(), MST->getRanges());
Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO,
MST->isTruncatingStore(),
MST->isCompressingStore());
Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
MST->isCompressingStore());
unsigned HiOffset = LoMemVT.getStoreSize();
MMO = DAG.getMachineFunction().getMachineMemOperand(
MST->getPointerInfo().getWithOffset(HiOffset),
MachineMemOperand::MOStore, HiMemVT.getStoreSize(), Alignment,
MST->getAAInfo(), MST->getRanges());
Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO,
MST->isTruncatingStore(),
MST->isCompressingStore());
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
return SDValue();
}
SDValue DAGCombiner::visitMGATHER(SDNode *N) {
MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(N);
SDValue Mask = MGT->getMask();
SDLoc DL(N);
// Zap gathers with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return CombineTo(N, MGT->getPassThru(), MGT->getChain());
if (Level >= AfterLegalizeTypes)
return SDValue();
// If the MGATHER result requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() != ISD::SETCC)
return SDValue();
EVT VT = N->getValueType(0);
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
SDValue PassThru = MGT->getPassThru();
SDValue PassThruLo, PassThruHi;
std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, DL);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
SDValue Chain = MGT->getChain();
EVT MemoryVT = MGT->getMemoryVT();
unsigned Alignment = MGT->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue Scale = MGT->getScale();
SDValue BasePtr = MGT->getBasePtr();
SDValue Index = MGT->getIndex();
SDValue IndexLo, IndexHi;
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
SDValue OpsLo[] = { Chain, PassThruLo, MaskLo, BasePtr, IndexLo, Scale };
Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo,
MMO);
SDValue OpsHi[] = { Chain, PassThruHi, MaskHi, BasePtr, IndexHi, Scale };
Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi,
MMO);
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain);
SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
SDValue RetOps[] = { GatherRes, Chain };
return DAG.getMergeValues(RetOps, DL);
}
SDValue DAGCombiner::visitMLOAD(SDNode *N) {
MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
SDValue Mask = MLD->getMask();
SDLoc DL(N);
// Zap masked loads with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return CombineTo(N, MLD->getPassThru(), MLD->getChain());
if (Level >= AfterLegalizeTypes)
return SDValue();
// If the MLOAD result requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() == ISD::SETCC) {
EVT VT = N->getValueType(0);
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
SDValue PassThru = MLD->getPassThru();
SDValue PassThruLo, PassThruHi;
std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, DL);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0));
SDValue Chain = MLD->getChain();
SDValue Ptr = MLD->getBasePtr();
EVT MemoryVT = MLD->getMemoryVT();
unsigned Alignment = MLD->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MLD->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MLD->getAAInfo(), MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, PassThruLo, LoMemVT,
MMO, ISD::NON_EXTLOAD, MLD->isExpandingLoad());
Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
MLD->isExpandingLoad());
unsigned HiOffset = LoMemVT.getStoreSize();
MMO = DAG.getMachineFunction().getMachineMemOperand(
MLD->getPointerInfo().getWithOffset(HiOffset),
MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), Alignment,
MLD->getAAInfo(), MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, PassThruHi, HiMemVT,
MMO, ISD::NON_EXTLOAD, MLD->isExpandingLoad());
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain);
SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
SDValue RetOps[] = { LoadRes, Chain };
return DAG.getMergeValues(RetOps, DL);
}
return SDValue();
}
/// A vector select of 2 constant vectors can be simplified to math/logic to
/// avoid a variable select instruction and possibly avoid constant loads.
SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
SDValue Cond = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 ||
!TLI.convertSelectOfConstantsToMath(VT) ||
!ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) ||
!ISD::isBuildVectorOfConstantSDNodes(N2.getNode()))
return SDValue();
// Check if we can use the condition value to increment/decrement a single
// constant value. This simplifies a select to an add and removes a constant
// load/materialization from the general case.
bool AllAddOne = true;
bool AllSubOne = true;
unsigned Elts = VT.getVectorNumElements();
for (unsigned i = 0; i != Elts; ++i) {
SDValue N1Elt = N1.getOperand(i);
SDValue N2Elt = N2.getOperand(i);
if (N1Elt.isUndef() || N2Elt.isUndef())
continue;
const APInt &C1 = cast<ConstantSDNode>(N1Elt)->getAPIntValue();
const APInt &C2 = cast<ConstantSDNode>(N2Elt)->getAPIntValue();
if (C1 != C2 + 1)
AllAddOne = false;
if (C1 != C2 - 1)
AllSubOne = false;
}
// Further simplifications for the extra-special cases where the constants are
// all 0 or all -1 should be implemented as folds of these patterns.
SDLoc DL(N);
if (AllAddOne || AllSubOne) {
// vselect <N x i1> Cond, C+1, C --> add (zext Cond), C
// vselect <N x i1> Cond, C-1, C --> add (sext Cond), C
auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond);
return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2);
}
// The general case for select-of-constants:
// vselect <N x i1> Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2
// ...but that only makes sense if a vselect is slower than 2 logic ops, so
// leave that to a machine-specific pass.
return SDValue();
}
SDValue DAGCombiner::visitVSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
SDLoc DL(N);
if (SDValue V = DAG.simplifySelect(N0, N1, N2))
return V;
// vselect (not Cond), N1, N2 -> vselect Cond, N2, N1
if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false))
return DAG.getSelect(DL, VT, F, N2, N1);
// Canonicalize integer abs.
// vselect (setg[te] X, 0), X, -X ->
// vselect (setgt X, -1), X, -X ->
// vselect (setl[te] X, 0), -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
if (N0.getOpcode() == ISD::SETCC) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
bool isAbs = false;
bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
(ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
if (isAbs) {
EVT VT = LHS.getValueType();
if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
return DAG.getNode(ISD::ABS, DL, VT, LHS);
SDValue Shift = DAG.getNode(
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
AddToWorklist(Shift.getNode());
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
}
// vselect x, y (fcmp lt x, y) -> fminnum x, y
// vselect x, y (fcmp gt x, y) -> fmaxnum x, y
//
// This is OK if we don't care about what happens if either operand is a
// NaN.
//
if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, N0.getOperand(0),
N0.getOperand(1), TLI)) {
if (SDValue FMinMax = combineMinNumMaxNum(
DL, VT, N0.getOperand(0), N0.getOperand(1), N1, N2, CC, TLI, DAG))
return FMinMax;
}
// If this select has a condition (setcc) with narrower operands than the
// select, try to widen the compare to match the select width.
// TODO: This should be extended to handle any constant.
// TODO: This could be extended to handle non-loading patterns, but that
// requires thorough testing to avoid regressions.
if (isNullOrNullSplat(RHS)) {
EVT NarrowVT = LHS.getValueType();
EVT WideVT = N1.getValueType().changeVectorElementTypeToInteger();
EVT SetCCVT = getSetCCResultType(LHS.getValueType());
unsigned SetCCWidth = SetCCVT.getScalarSizeInBits();
unsigned WideWidth = WideVT.getScalarSizeInBits();
bool IsSigned = isSignedIntSetCC(CC);
auto LoadExtOpcode = IsSigned ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
if (LHS.getOpcode() == ISD::LOAD && LHS.hasOneUse() &&
SetCCWidth != 1 && SetCCWidth < WideWidth &&
TLI.isLoadExtLegalOrCustom(LoadExtOpcode, WideVT, NarrowVT) &&
TLI.isOperationLegalOrCustom(ISD::SETCC, WideVT)) {
// Both compare operands can be widened for free. The LHS can use an
// extended load, and the RHS is a constant:
// vselect (ext (setcc load(X), C)), N1, N2 -->
// vselect (setcc extload(X), C'), N1, N2
auto ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
SDValue WideLHS = DAG.getNode(ExtOpcode, DL, WideVT, LHS);
SDValue WideRHS = DAG.getNode(ExtOpcode, DL, WideVT, RHS);
EVT WideSetCCVT = getSetCCResultType(WideVT);
SDValue WideSetCC = DAG.getSetCC(DL, WideSetCCVT, WideLHS, WideRHS, CC);
return DAG.getSelect(DL, N1.getValueType(), WideSetCC, N1, N2);
}
}
}
if (SimplifySelectOps(N, N1, N2))
return SDValue(N, 0); // Don't revisit N.
// Fold (vselect (build_vector all_ones), N1, N2) -> N1
if (ISD::isBuildVectorAllOnes(N0.getNode()))
return N1;
// Fold (vselect (build_vector all_zeros), N1, N2) -> N2
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N2;
// The ConvertSelectToConcatVector function is assuming both the above
// checks for (vselect (build_vector all{ones,zeros) ...) have been made
// and addressed.
if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
N2.getOpcode() == ISD::CONCAT_VECTORS &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
if (SDValue CV = ConvertSelectToConcatVector(N, DAG))
return CV;
}
if (SDValue V = foldVSelectOfConstants(N))
return V;
return SDValue();
}
SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
SDValue N3 = N->getOperand(3);
SDValue N4 = N->getOperand(4);
ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
// fold select_cc lhs, rhs, x, x, cc -> x
if (N2 == N3)
return N2;
// Determine if the condition we're dealing with is constant
if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1,
CC, SDLoc(N), false)) {
AddToWorklist(SCC.getNode());
if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
if (!SCCC->isNullValue())
return N2; // cond always true -> true val
else
return N3; // cond always false -> false val
} else if (SCC->isUndef()) {
// When the condition is UNDEF, just return the first operand. This is
// coherent the DAG creation, no setcc node is created in this case
return N2;
} else if (SCC.getOpcode() == ISD::SETCC) {
// Fold to a simpler select_cc
SDValue SelectOp = DAG.getNode(
ISD::SELECT_CC, SDLoc(N), N2.getValueType(), SCC.getOperand(0),
SCC.getOperand(1), N2, N3, SCC.getOperand(2));
SelectOp->setFlags(SCC->getFlags());
return SelectOp;
}
}
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N2, N3))
return SDValue(N, 0); // Don't revisit N.
// fold select_cc into other things, such as min/max/abs
return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
}
SDValue DAGCombiner::visitSETCC(SDNode *N) {
// setcc is very commonly used as an argument to brcond. This pattern
// also lend itself to numerous combines and, as a result, it is desired
// we keep the argument to a brcond as a setcc as much as possible.
bool PreferSetCC =
N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND;
SDValue Combined = SimplifySetCC(
N->getValueType(0), N->getOperand(0), N->getOperand(1),
cast<CondCodeSDNode>(N->getOperand(2))->get(), SDLoc(N), !PreferSetCC);
if (!Combined)
return SDValue();
// If we prefer to have a setcc, and we don't, we'll try our best to
// recreate one using rebuildSetCC.
if (PreferSetCC && Combined.getOpcode() != ISD::SETCC) {
SDValue NewSetCC = rebuildSetCC(Combined);
// We don't have anything interesting to combine to.
if (NewSetCC.getNode() == N)
return SDValue();
if (NewSetCC)
return NewSetCC;
}
return Combined;
}
SDValue DAGCombiner::visitSETCCCARRY(SDNode *N) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Carry = N->getOperand(2);
SDValue Cond = N->getOperand(3);
// If Carry is false, fold to a regular SETCC.
if (isNullConstant(Carry))
return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond);
return SDValue();
}
/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
/// a build_vector of constants.
/// This function is called by the DAGCombiner when visiting sext/zext/aext
/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
/// Vector extends are not folded if operations are legal; this is to
/// avoid introducing illegal build_vector dag nodes.
static SDValue tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
SelectionDAG &DAG, bool LegalTypes) {
unsigned Opcode = N->getOpcode();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
SDLoc DL(N);
assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
Opcode == ISD::ZERO_EXTEND_VECTOR_INREG)
&& "Expected EXTEND dag node in input!");
// fold (sext c1) -> c1
// fold (zext c1) -> c1
// fold (aext c1) -> c1
if (isa<ConstantSDNode>(N0))
return DAG.getNode(Opcode, DL, VT, N0);
// fold (sext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
// fold (zext (select cond, c1, c2)) -> (select cond, zext c1, zext c2)
// fold (aext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
if (N0->getOpcode() == ISD::SELECT) {
SDValue Op1 = N0->getOperand(1);
SDValue Op2 = N0->getOperand(2);
if (isa<ConstantSDNode>(Op1) && isa<ConstantSDNode>(Op2) &&
(Opcode != ISD::ZERO_EXTEND || !TLI.isZExtFree(N0.getValueType(), VT))) {
// For any_extend, choose sign extension of the constants to allow a
// possible further transform to sign_extend_inreg.i.e.
//
// t1: i8 = select t0, Constant:i8<-1>, Constant:i8<0>
// t2: i64 = any_extend t1
// -->
// t3: i64 = select t0, Constant:i64<-1>, Constant:i64<0>
// -->
// t4: i64 = sign_extend_inreg t3
unsigned FoldOpc = Opcode;
if (FoldOpc == ISD::ANY_EXTEND)
FoldOpc = ISD::SIGN_EXTEND;
return DAG.getSelect(DL, VT, N0->getOperand(0),
DAG.getNode(FoldOpc, DL, VT, Op1),
DAG.getNode(FoldOpc, DL, VT, Op2));
}
}
// fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
// fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
// fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
EVT SVT = VT.getScalarType();
if (!(VT.isVector() && (!LegalTypes || TLI.isTypeLegal(SVT)) &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
return SDValue();
// We can fold this node into a build_vector.
unsigned VTBits = SVT.getSizeInBits();
unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits();
SmallVector<SDValue, 8> Elts;
unsigned NumElts = VT.getVectorNumElements();
// For zero-extensions, UNDEF elements still guarantee to have the upper
// bits set to zero.
bool IsZext =
Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG;
for (unsigned i = 0; i != NumElts; ++i) {
SDValue Op = N0.getOperand(i);
if (Op.isUndef()) {
Elts.push_back(IsZext ? DAG.getConstant(0, DL, SVT) : DAG.getUNDEF(SVT));
continue;
}
SDLoc DL(Op);
// Get the constant value and if needed trunc it to the size of the type.
// Nodes like build_vector might have constants wider than the scalar type.
APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
else
Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
}
return DAG.getBuildVector(VT, DL, Elts);
}
// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
// transformation. Returns true if extension are possible and the above
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0,
unsigned ExtOpc,
SmallVectorImpl<SDNode *> &ExtendNodes,
const TargetLowering &TLI) {
bool HasCopyToRegUses = false;
bool isTruncFree = TLI.isTruncateFree(VT, N0.getValueType());
for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
UE = N0.getNode()->use_end();
UI != UE; ++UI) {
SDNode *User = *UI;
if (User == N)
continue;
if (UI.getUse().getResNo() != N0.getResNo())
continue;
// FIXME: Only extend SETCC N, N and SETCC N, c for now.
if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
// Sign bits will be lost after a zext.
return false;
bool Add = false;
for (unsigned i = 0; i != 2; ++i) {
SDValue UseOp = User->getOperand(i);
if (UseOp == N0)
continue;
if (!isa<ConstantSDNode>(UseOp))
return false;
Add = true;
}
if (Add)
ExtendNodes.push_back(User);
continue;
}
// If truncates aren't free and there are users we can't
// extend, it isn't worthwhile.
if (!isTruncFree)
return false;
// Remember if this value is live-out.
if (User->getOpcode() == ISD::CopyToReg)
HasCopyToRegUses = true;
}
if (HasCopyToRegUses) {
bool BothLiveOut = false;
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
UI != UE; ++UI) {
SDUse &Use = UI.getUse();
if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
BothLiveOut = true;
break;
}
}
if (BothLiveOut)
// Both unextended and extended values are live out. There had better be
// a good reason for the transformation.
return ExtendNodes.size();
}
return true;
}
void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue OrigLoad, SDValue ExtLoad,
ISD::NodeType ExtType) {
// Extend SetCC uses if necessary.
SDLoc DL(ExtLoad);
for (SDNode *SetCC : SetCCs) {
SmallVector<SDValue, 4> Ops;
for (unsigned j = 0; j != 2; ++j) {
SDValue SOp = SetCC->getOperand(j);
if (SOp == OrigLoad)
Ops.push_back(ExtLoad);
else
Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
}
Ops.push_back(SetCC->getOperand(2));
CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
}
}
// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT DstVT = N->getValueType(0);
EVT SrcVT = N0.getValueType();
assert((N->getOpcode() == ISD::SIGN_EXTEND ||
N->getOpcode() == ISD::ZERO_EXTEND) &&
"Unexpected node type (not an extend)!");
// fold (sext (load x)) to multiple smaller sextloads; same for zext.
// For example, on a target with legal v4i32, but illegal v8i32, turn:
// (v8i32 (sext (v8i16 (load x))))
// into:
// (v8i32 (concat_vectors (v4i32 (sextload x)),
// (v4i32 (sextload (x + 16)))))
// Where uses of the original load, i.e.:
// (v8i16 (load x))
// are replaced with:
// (v8i16 (truncate
// (v8i32 (concat_vectors (v4i32 (sextload x)),
// (v4i32 (sextload (x + 16)))))))
//
// This combine is only applicable to illegal, but splittable, vectors.
// All legal types, and illegal non-vector types, are handled elsewhere.
// This combine is controlled by TargetLowering::isVectorLoadExtDesirable.
//
if (N0->getOpcode() != ISD::LOAD)
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) ||
!N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() ||
!DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
return SDValue();
SmallVector<SDNode *, 4> SetCCs;
if (!ExtendUsesToFormExtLoad(DstVT, N, N0, N->getOpcode(), SetCCs, TLI))
return SDValue();
ISD::LoadExtType ExtType =
N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
// Try to split the vector types to get down to legal types.
EVT SplitSrcVT = SrcVT;
EVT SplitDstVT = DstVT;
while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) &&
SplitSrcVT.getVectorNumElements() > 1) {
SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first;
SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first;
}
if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT))
return SDValue();
SDLoc DL(N);
const unsigned NumSplits =
DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements();
const unsigned Stride = SplitSrcVT.getStoreSize();
SmallVector<SDValue, 4> Loads;
SmallVector<SDValue, 4> Chains;
SDValue BasePtr = LN0->getBasePtr();
for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
const unsigned Offset = Idx * Stride;
const unsigned Align = MinAlign(LN0->getAlignment(), Offset);
SDValue SplitLoad = DAG.getExtLoad(
ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Stride, DL, BasePtr.getValueType()));
Loads.push_back(SplitLoad.getValue(0));
Chains.push_back(SplitLoad.getValue(1));
}
SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads);
// Simplify TF.
AddToWorklist(NewChain.getNode());
CombineTo(N, NewValue);
// Replace uses of the original load (before extension)
// with a truncate of the concatenated sextloaded vectors.
SDValue Trunc =
DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue);
ExtendSetCCUses(SetCCs, N0, NewValue, (ISD::NodeType)N->getOpcode());
CombineTo(N0.getNode(), Trunc, NewChain);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
// (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
SDValue DAGCombiner::CombineZExtLogicopShiftLoad(SDNode *N) {
assert(N->getOpcode() == ISD::ZERO_EXTEND);
EVT VT = N->getValueType(0);
EVT OrigVT = N->getOperand(0).getValueType();
if (TLI.isZExtFree(OrigVT, VT))
return SDValue();
// and/or/xor
SDValue N0 = N->getOperand(0);
if (!(N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
N0.getOpcode() == ISD::XOR) ||
N0.getOperand(1).getOpcode() != ISD::Constant ||
(LegalOperations && !TLI.isOperationLegal(N0.getOpcode(), VT)))
return SDValue();
// shl/shr
SDValue N1 = N0->getOperand(0);
if (!(N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::SRL) ||
N1.getOperand(1).getOpcode() != ISD::Constant ||
(LegalOperations && !TLI.isOperationLegal(N1.getOpcode(), VT)))
return SDValue();
// load
if (!isa<LoadSDNode>(N1.getOperand(0)))
return SDValue();
LoadSDNode *Load = cast<LoadSDNode>(N1.getOperand(0));
EVT MemVT = Load->getMemoryVT();
if (!TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) ||
Load->getExtensionType() == ISD::SEXTLOAD || Load->isIndexed())
return SDValue();
// If the shift op is SHL, the logic op must be AND, otherwise the result
// will be wrong.
if (N1.getOpcode() == ISD::SHL && N0.getOpcode() != ISD::AND)
return SDValue();
if (!N0.hasOneUse() || !N1.hasOneUse())
return SDValue();
SmallVector<SDNode*, 4> SetCCs;
if (!ExtendUsesToFormExtLoad(VT, N1.getNode(), N1.getOperand(0),
ISD::ZERO_EXTEND, SetCCs, TLI))
return SDValue();
// Actually do the transformation.
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Load), VT,
Load->getChain(), Load->getBasePtr(),
Load->getMemoryVT(), Load->getMemOperand());
SDLoc DL1(N1);
SDValue Shift = DAG.getNode(N1.getOpcode(), DL1, VT, ExtLoad,
N1.getOperand(1));
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL0(N0);
SDValue And = DAG.getNode(N0.getOpcode(), DL0, VT, Shift,
DAG.getConstant(Mask, DL0, VT));
ExtendSetCCUses(SetCCs, N1.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
CombineTo(N, And);
if (SDValue(Load, 0).hasOneUse()) {
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
} else {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(Load),
Load->getValueType(0), ExtLoad);
CombineTo(Load, Trunc, ExtLoad.getValue(1));
}
// N0 is dead at this point.
recursivelyDeleteUnusedNodes(N0.getNode());
return SDValue(N,0); // Return N so it doesn't get rechecked!
}
/// If we're narrowing or widening the result of a vector select and the final
/// size is the same size as a setcc (compare) feeding the select, then try to
/// apply the cast operation to the select's operands because matching vector
/// sizes for a select condition and other operands should be more efficient.
SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {
unsigned CastOpcode = Cast->getOpcode();
assert((CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND ||
CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND ||
CastOpcode == ISD::FP_ROUND) &&
"Unexpected opcode for vector select narrowing/widening");
// We only do this transform before legal ops because the pattern may be
// obfuscated by target-specific operations after legalization. Do not create
// an illegal select op, however, because that may be difficult to lower.
EVT VT = Cast->getValueType(0);
if (LegalOperations || !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
return SDValue();
SDValue VSel = Cast->getOperand(0);
if (VSel.getOpcode() != ISD::VSELECT || !VSel.hasOneUse() ||
VSel.getOperand(0).getOpcode() != ISD::SETCC)
return SDValue();
// Does the setcc have the same vector size as the casted select?
SDValue SetCC = VSel.getOperand(0);
EVT SetCCVT = getSetCCResultType(SetCC.getOperand(0).getValueType());
if (SetCCVT.getSizeInBits() != VT.getSizeInBits())
return SDValue();
// cast (vsel (setcc X), A, B) --> vsel (setcc X), (cast A), (cast B)
SDValue A = VSel.getOperand(1);
SDValue B = VSel.getOperand(2);
SDValue CastA, CastB;
SDLoc DL(Cast);
if (CastOpcode == ISD::FP_ROUND) {
// FP_ROUND (fptrunc) has an extra flag operand to pass along.
CastA = DAG.getNode(CastOpcode, DL, VT, A, Cast->getOperand(1));
CastB = DAG.getNode(CastOpcode, DL, VT, B, Cast->getOperand(1));
} else {
CastA = DAG.getNode(CastOpcode, DL, VT, A);
CastB = DAG.getNode(CastOpcode, DL, VT, B);
}
return DAG.getNode(ISD::VSELECT, DL, VT, SetCC, CastA, CastB);
}
// fold ([s|z]ext ([s|z]extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// fold ([s|z]ext ( extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
const TargetLowering &TLI, EVT VT,
bool LegalOperations, SDNode *N,
SDValue N0, ISD::LoadExtType ExtLoadType) {
SDNode *N0Node = N0.getNode();
bool isAExtLoad = (ExtLoadType == ISD::SEXTLOAD) ? ISD::isSEXTLoad(N0Node)
: ISD::isZEXTLoad(N0Node);
if ((!isAExtLoad && !ISD::isEXTLoad(N0Node)) ||
!ISD::isUNINDEXEDLoad(N0Node) || !N0.hasOneUse())
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
if ((LegalOperations || LN0->isVolatile() || VT.isVector()) &&
!TLI.isLoadExtLegal(ExtLoadType, VT, MemVT))
return SDValue();
SDValue ExtLoad =
DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
LN0->getBasePtr(), MemVT, LN0->getMemOperand());
Combiner.CombineTo(N, ExtLoad);
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
if (LN0->use_empty())
Combiner.recursivelyDeleteUnusedNodes(LN0);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// fold ([s|z]ext (load x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// Only generate vector extloads when 1) they're legal, and 2) they are
// deemed desirable by the target.
static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
const TargetLowering &TLI, EVT VT,
bool LegalOperations, SDNode *N, SDValue N0,
ISD::LoadExtType ExtLoadType,
ISD::NodeType ExtOpc) {
if (!ISD::isNON_EXTLoad(N0.getNode()) ||
!ISD::isUNINDEXEDLoad(N0.getNode()) ||
((LegalOperations || VT.isVector() ||
cast<LoadSDNode>(N0)->isVolatile()) &&
!TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType())))
return {};
bool DoXform = true;
SmallVector<SDNode *, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ExtOpc, SetCCs, TLI);
if (VT.isVector())
DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
if (!DoXform)
return {};
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, ExtOpc);
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
Combiner.CombineTo(N, ExtLoad);
if (NoReplaceTrunc) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
Combiner.recursivelyDeleteUnusedNodes(LN0);
} else {
SDValue Trunc =
DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
Combiner.CombineTo(LN0, Trunc, ExtLoad.getValue(1));
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
bool LegalOperations) {
assert((N->getOpcode() == ISD::SIGN_EXTEND ||
N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext");
SDValue SetCC = N->getOperand(0);
if (LegalOperations || SetCC.getOpcode() != ISD::SETCC ||
!SetCC.hasOneUse() || SetCC.getValueType() != MVT::i1)
return SDValue();
SDValue X = SetCC.getOperand(0);
SDValue Ones = SetCC.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
EVT VT = N->getValueType(0);
EVT XVT = X.getValueType();
// setge X, C is canonicalized to setgt, so we do not need to match that
// pattern. The setlt sibling is folded in SimplifySelectCC() because it does
// not require the 'not' op.
if (CC == ISD::SETGT && isAllOnesConstant(Ones) && VT == XVT) {
// Invert and smear/shift the sign bit:
// sext i1 (setgt iN X, -1) --> sra (not X), (N - 1)
// zext i1 (setgt iN X, -1) --> srl (not X), (N - 1)
SDLoc DL(N);
SDValue NotX = DAG.getNOT(DL, X, VT);
SDValue ShiftAmount = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
auto ShiftOpcode = N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SRA : ISD::SRL;
return DAG.getNode(ShiftOpcode, DL, VT, NotX, ShiftAmount);
}
return SDValue();
}
SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
SDLoc DL(N);
if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
return Res;
// fold (sext (sext x)) -> (sext x)
// fold (sext (aext x)) -> (sext x)
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N0.getOperand(0));
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (sext (truncate (load x))) -> (sext (smaller load x))
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// See if the value being truncated is already sign extended. If so, just
// eliminate the trunc/sext pair.
SDValue Op = N0.getOperand(0);
unsigned OpBits = Op.getScalarValueSizeInBits();
unsigned MidBits = N0.getScalarValueSizeInBits();
unsigned DestBits = VT.getScalarSizeInBits();
unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
if (OpBits == DestBits) {
// Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
// bits, it is already ready.
if (NumSignBits > DestBits-MidBits)
return Op;
} else if (OpBits < DestBits) {
// Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
// bits, just sext from i32.
if (NumSignBits > OpBits-MidBits)
return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op);
} else {
// Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
// bits, just truncate to i32.
if (NumSignBits > OpBits-MidBits)
return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
}
// fold (sext (truncate x)) -> (sextinreg x).
if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
N0.getValueType())) {
if (OpBits < DestBits)
Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
else if (OpBits > DestBits)
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Op,
DAG.getValueType(N0.getValueType()));
}
}
// Try to simplify (sext (load x)).
if (SDValue foldedExt =
tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
ISD::SEXTLOAD, ISD::SIGN_EXTEND))
return foldedExt;
// fold (sext (load x)) to multiple smaller sextloads.
// Only on illegal but splittable vectors.
if (SDValue ExtLoad = CombineExtLoad(N))
return ExtLoad;
// Try to simplify (sext (sextload x)).
if (SDValue foldedExt = tryToFoldExtOfExtload(
DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::SEXTLOAD))
return foldedExt;
// fold (sext (and/or/xor (load x), cst)) ->
// (and/or/xor (sextload x), (sext cst))
if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
N0.getOpcode() == ISD::XOR) &&
isa<LoadSDNode>(N0.getOperand(0)) &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
EVT MemVT = LN00->getMemoryVT();
if (TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT) &&
LN00->getExtensionType() != ISD::ZEXTLOAD && LN00->isUnindexed()) {
SmallVector<SDNode*, 4> SetCCs;
bool DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
ISD::SIGN_EXTEND, SetCCs, TLI);
if (DoXform) {
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN00), VT,
LN00->getChain(), LN00->getBasePtr(),
LN00->getMemoryVT(),
LN00->getMemOperand());
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.sext(VT.getSizeInBits());
SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
ExtLoad, DAG.getConstant(Mask, DL, VT));
ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::SIGN_EXTEND);
bool NoReplaceTruncAnd = !N0.hasOneUse();
bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
CombineTo(N, And);
// If N0 has multiple uses, change other uses as well.
if (NoReplaceTruncAnd) {
SDValue TruncAnd =
DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
CombineTo(N0.getNode(), TruncAnd);
}
if (NoReplaceTrunc) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
} else {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
LN00->getValueType(0), ExtLoad);
CombineTo(LN00, Trunc, ExtLoad.getValue(1));
}
return SDValue(N,0); // Return N so it doesn't get rechecked!
}
}
}
if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
return V;
if (N0.getOpcode() == ISD::SETCC) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
EVT N00VT = N0.getOperand(0).getValueType();
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations &&
TLI.getBooleanContents(N00VT) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
// On some architectures (such as SSE/NEON/etc) the SETCC result type is
// of the same size as the compared operands. Only optimize sext(setcc())
// if this is the case.
EVT SVT = getSetCCResultType(N00VT);
// If we already have the desired type, don't change it.
if (SVT != N0.getValueType()) {
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == SVT.getSizeInBits())
return DAG.getSetCC(DL, VT, N00, N01, CC);
// If the desired elements are smaller or larger than the source
// elements, we can use a matching integer vector type and then
// truncate/sign extend.
EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
if (SVT == MatchingVecType) {
SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC);
return DAG.getSExtOrTrunc(VsetCC, DL, VT);
}
}
}
// sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0)
// Here, T can be 1 or -1, depending on the type of the setcc and
// getBooleanContents().
unsigned SetCCWidth = N0.getScalarValueSizeInBits();
// To determine the "true" side of the select, we need to know the high bit
// of the value returned by the setcc if it evaluates to true.
// If the type of the setcc is i1, then the true case of the select is just
// sext(i1 1), that is, -1.
// If the type of the setcc is larger (say, i8) then the value of the high
// bit depends on getBooleanContents(), so ask TLI for a real "true" value
// of the appropriate width.
SDValue ExtTrueVal = (SetCCWidth == 1)
? DAG.getAllOnesConstant(DL, VT)
: DAG.getBoolConstant(true, DL, VT, N00VT);
SDValue Zero = DAG.getConstant(0, DL, VT);
if (SDValue SCC =
SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true))
return SCC;
if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) {
EVT SetCCVT = getSetCCResultType(N00VT);
// Don't do this transform for i1 because there's a select transform
// that would reverse it.
// TODO: We should not do this transform at all without a target hook
// because a sext is likely cheaper than a select?
if (SetCCVT.getScalarSizeInBits() != 1 &&
(!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) {
SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC);
return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero);
}
}
}
// fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0);
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
// Eliminate this sign extend by doing a negation in the destination type:
// sext i32 (0 - (zext i8 X to i32)) to i64 --> 0 - (zext i8 X to i64)
if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
isNullOrNullSplat(N0.getOperand(0)) &&
N0.getOperand(1).getOpcode() == ISD::ZERO_EXTEND &&
TLI.isOperationLegalOrCustom(ISD::SUB, VT)) {
SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(1).getOperand(0), DL, VT);
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Zext);
}
// Eliminate this sign extend by doing a decrement in the destination type:
// sext i32 ((zext i8 X to i32) + (-1)) to i64 --> (zext i8 X to i64) + (-1)
if (N0.getOpcode() == ISD::ADD && N0.hasOneUse() &&
isAllOnesOrAllOnesSplat(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
TLI.isOperationLegalOrCustom(ISD::ADD, VT)) {
SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(0).getOperand(0), DL, VT);
return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
}
return SDValue();
}
// isTruncateOf - If N is a truncate of some other value, return true, record
// the value being truncated in Op and which of Op's bits are zero/one in Known.
// This function computes KnownBits to avoid a duplicated call to
// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
KnownBits &Known) {
if (N->getOpcode() == ISD::TRUNCATE) {
Op = N->getOperand(0);
Known = DAG.computeKnownBits(Op);
return true;
}
if (N.getOpcode() != ISD::SETCC ||
N.getValueType().getScalarType() != MVT::i1 ||
cast<CondCodeSDNode>(N.getOperand(2))->get() != ISD::SETNE)
return false;
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
assert(Op0.getValueType() == Op1.getValueType());
if (isNullOrNullSplat(Op0))
Op = Op1;
else if (isNullOrNullSplat(Op1))
Op = Op0;
else
return false;
Known = DAG.computeKnownBits(Op);
return (Known.Zero | 1).isAllOnesValue();
}
SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
return Res;
// fold (zext (zext x)) -> (zext x)
// fold (zext (aext x)) -> (zext x)
if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
N0.getOperand(0));
// fold (zext (truncate x)) -> (zext x) or
// (zext (truncate x)) -> (truncate x)
// This is valid when the truncated bits of x are already zero.
SDValue Op;
KnownBits Known;
if (isTruncateOf(DAG, N0, Op, Known)) {
APInt TruncatedBits =
(Op.getScalarValueSizeInBits() == N0.getScalarValueSizeInBits()) ?
APInt(Op.getScalarValueSizeInBits(), 0) :
APInt::getBitsSet(Op.getScalarValueSizeInBits(),
N0.getScalarValueSizeInBits(),
std::min(Op.getScalarValueSizeInBits(),
VT.getScalarSizeInBits()));
if (TruncatedBits.isSubsetOf(Known.Zero))
return DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
}
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
EVT SrcVT = N0.getOperand(0).getValueType();
EVT MinVT = N0.getValueType();
// Try to mask before the extension to avoid having to generate a larger mask,
// possibly over several sub-vectors.
if (SrcVT.bitsLT(VT) && VT.isVector()) {
if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) &&
TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) {
SDValue Op = N0.getOperand(0);
Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
AddToWorklist(Op.getNode());
SDValue ZExtOrTrunc = DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
// Transfer the debug info; the new node is equivalent to N0.
DAG.transferDbgValues(N0, ZExtOrTrunc);
return ZExtOrTrunc;
}
}
if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) {
SDValue Op = DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
AddToWorklist(Op.getNode());
SDValue And = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
// We may safely transfer the debug info describing the truncate node over
// to the equivalent and operation.
DAG.transferDbgValues(N0, And);
return And;
}
}
// Fold (zext (and (trunc x), cst)) -> (and x, cst),
// if either of the casts is not free.
if (N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
(!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
N0.getValueType()) ||
!TLI.isZExtFree(N0.getValueType(), VT))) {
SDValue X = N0.getOperand(0).getOperand(0);
X = DAG.getAnyExtOrTrunc(X, SDLoc(X), VT);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
X, DAG.getConstant(Mask, DL, VT));
}
// Try to simplify (zext (load x)).
if (SDValue foldedExt =
tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
return foldedExt;
// fold (zext (load x)) to multiple smaller zextloads.
// Only on illegal but splittable vectors.
if (SDValue ExtLoad = CombineExtLoad(N))
return ExtLoad;
// fold (zext (and/or/xor (load x), cst)) ->
// (and/or/xor (zextload x), (zext cst))
// Unless (and (load x) cst) will match as a zextload already and has
// additional users.
if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
N0.getOpcode() == ISD::XOR) &&
isa<LoadSDNode>(N0.getOperand(0)) &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
EVT MemVT = LN00->getMemoryVT();
if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) &&
LN00->getExtensionType() != ISD::SEXTLOAD && LN00->isUnindexed()) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse()) {
if (N0.getOpcode() == ISD::AND) {
auto *AndC = cast<ConstantSDNode>(N0.getOperand(1));
EVT LoadResultTy = AndC->getValueType(0);
EVT ExtVT;
if (isAndLoadExtLoad(AndC, LN00, LoadResultTy, ExtVT))
DoXform = false;
}
}
if (DoXform)
DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
ISD::ZERO_EXTEND, SetCCs, TLI);
if (DoXform) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN00), VT,
LN00->getChain(), LN00->getBasePtr(),
LN00->getMemoryVT(),
LN00->getMemOperand());
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL(N);
SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
ExtLoad, DAG.getConstant(Mask, DL, VT));
ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
bool NoReplaceTruncAnd = !N0.hasOneUse();
bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
CombineTo(N, And);
// If N0 has multiple uses, change other uses as well.
if (NoReplaceTruncAnd) {
SDValue TruncAnd =
DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
CombineTo(N0.getNode(), TruncAnd);
}
if (NoReplaceTrunc) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
} else {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
LN00->getValueType(0), ExtLoad);
CombineTo(LN00, Trunc, ExtLoad.getValue(1));
}
return SDValue(N,0); // Return N so it doesn't get rechecked!
}
}
}
// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
// (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
if (SDValue ZExtLoad = CombineZExtLogicopShiftLoad(N))
return ZExtLoad;
// Try to simplify (zext (zextload x)).
if (SDValue foldedExt = tryToFoldExtOfExtload(
DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::ZEXTLOAD))
return foldedExt;
if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
return V;
if (N0.getOpcode() == ISD::SETCC) {
// Only do this before legalize for now.
if (!LegalOperations && VT.isVector() &&
N0.getValueType().getVectorElementType() == MVT::i1) {
EVT N00VT = N0.getOperand(0).getValueType();
if (getSetCCResultType(N00VT) == N0.getValueType())
return SDValue();
// We know that the # elements of the results is the same as the #
// elements of the compare (and the # elements of the compare result for
// that matter). Check to see that they are the same size. If so, we know
// that the element size of the sext'd result matches the element size of
// the compare operands.
SDLoc DL(N);
SDValue VecOnes = DAG.getConstant(1, DL, VT);
if (VT.getSizeInBits() == N00VT.getSizeInBits()) {
// zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0),
N0.getOperand(1), N0.getOperand(2));
return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes);
}
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend.
EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
SDValue VsetCC =
DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0),
N0.getOperand(1), N0.getOperand(2));
return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT),
VecOnes);
}
// zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
if (SDValue SCC = SimplifySelectCC(
DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
return SCC;
}
// (zext (shl (zext x), cst)) -> (shl (zext x), cst)
if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
N0.hasOneUse()) {
SDValue ShAmt = N0.getOperand(1);
if (N0.getOpcode() == ISD::SHL) {
SDValue InnerZExt = N0.getOperand(0);
// If the original shl may be shifting out bits, do not perform this
// transformation.
unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() -
InnerZExt.getOperand(0).getValueSizeInBits();
if (cast<ConstantSDNode>(ShAmt)->getAPIntValue().ugt(KnownZeroBits))
return SDValue();
}
SDLoc DL(N);
// Ensure that the shift amount is wide enough for the shifted value.
if (VT.getSizeInBits() >= 256)
ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
return DAG.getNode(N0.getOpcode(), DL, VT,
DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
ShAmt);
}
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
return SDValue();
}
SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
return Res;
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
if (N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND)
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
// fold (aext (truncate (load x))) -> (aext (smaller load x))
// fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (aext (truncate x))
if (N0.getOpcode() == ISD::TRUNCATE)
return DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
// Fold (aext (and (trunc x), cst)) -> (and x, cst)
// if the trunc is not free.
if (N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
N0.getValueType())) {
SDLoc DL(N);
SDValue X = N0.getOperand(0).getOperand(0);
X = DAG.getAnyExtOrTrunc(X, DL, VT);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
return DAG.getNode(ISD::AND, DL, VT,
X, DAG.getConstant(Mask, DL, VT));
}
// fold (aext (load x)) -> (aext (truncate (extload x)))
// None of the supported targets knows how to perform load and any_ext
// on vectors in one instruction. We only perform this transformation on
// scalars.
if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
ISD::isUNINDEXEDLoad(N0.getNode()) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs,
TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
ExtendSetCCUses(SetCCs, N0, ExtLoad, ISD::ANY_EXTEND);
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = N0.hasOneUse();
CombineTo(N, ExtLoad);
if (NoReplaceTrunc) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
recursivelyDeleteUnusedNodes(LN0);
} else {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (aext (zextload x)) -> (aext (truncate (zextload x)))
// fold (aext (sextload x)) -> (aext (truncate (sextload x)))
// fold (aext ( extload x)) -> (aext (truncate (extload x)))
if (N0.getOpcode() == ISD::LOAD && !ISD::isNON_EXTLoad(N0.getNode()) &&
ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
ISD::LoadExtType ExtType = LN0->getExtensionType();
EVT MemVT = LN0->getMemoryVT();
if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
VT, LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
CombineTo(N, ExtLoad);
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
recursivelyDeleteUnusedNodes(LN0);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
if (N0.getOpcode() == ISD::SETCC) {
// For vectors:
// aext(setcc) -> vsetcc
// aext(setcc) -> truncate(vsetcc)
// aext(setcc) -> aext(vsetcc)
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations) {
EVT N00VT = N0.getOperand(0).getValueType();
if (getSetCCResultType(N00VT) == N0.getValueType())
return SDValue();
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == N00VT.getSizeInBits())
return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/any extend
EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
SDValue VsetCC =
DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
}
// aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
if (SDValue SCC = SimplifySelectCC(
DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
return SCC;
}
return SDValue();
}
SDValue DAGCombiner::visitAssertExt(SDNode *N) {
unsigned Opcode = N->getOpcode();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT AssertVT = cast<VTSDNode>(N1)->getVT();
// fold (assert?ext (assert?ext x, vt), vt) -> (assert?ext x, vt)
if (N0.getOpcode() == Opcode &&
AssertVT == cast<VTSDNode>(N0.getOperand(1))->getVT())
return N0;
if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == Opcode) {
// We have an assert, truncate, assert sandwich. Make one stronger assert
// by asserting on the smallest asserted type to the larger source type.
// This eliminates the later assert:
// assert (trunc (assert X, i8) to iN), i1 --> trunc (assert X, i1) to iN
// assert (trunc (assert X, i1) to iN), i8 --> trunc (assert X, i1) to iN
SDValue BigA = N0.getOperand(0);
EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&
"Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information");
SDLoc DL(N);
EVT MinAssertVT = AssertVT.bitsLT(BigA_AssertVT) ? AssertVT : BigA_AssertVT;
SDValue MinAssertVTVal = DAG.getValueType(MinAssertVT);
SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
BigA.getOperand(0), MinAssertVTVal);
return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
}
// If we have (AssertZext (truncate (AssertSext X, iX)), iY) and Y is smaller
// than X. Just move the AssertZext in front of the truncate and drop the
// AssertSExt.
if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == ISD::AssertSext &&
Opcode == ISD::AssertZext) {
SDValue BigA = N0.getOperand(0);
EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&
"Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information");
if (AssertVT.bitsLT(BigA_AssertVT)) {
SDLoc DL(N);
SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
BigA.getOperand(0), N1);
return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
}
}
return SDValue();
}
/// If the result of a wider load is shifted to right of N bits and then
/// truncated to a narrower type and where N is a multiple of number of bits of
/// the narrower type, transform it to a narrower load from address + N / num of
/// bits of new type. Also narrow the load if the result is masked with an AND
/// to effectively produce a smaller type. If the result is to be extended, also
/// fold the extension to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT ExtVT = VT;
// This transformation isn't valid for vector loads.
if (VT.isVector())
return SDValue();
unsigned ShAmt = 0;
bool HasShiftedOffset = false;
// Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
// extended to VT.
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
} else if (Opc == ISD::SRL) {
// Another special-case: SRL is basically zero-extending a narrower value,
// or it maybe shifting a higher subword, half or byte into the lowest
// bits.
ExtType = ISD::ZEXTLOAD;
N0 = SDValue(N, 0);
auto *LN0 = dyn_cast<LoadSDNode>(N0.getOperand(0));
auto *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01 || !LN0)
return SDValue();
uint64_t ShiftAmt = N01->getZExtValue();
uint64_t MemoryWidth = LN0->getMemoryVT().getSizeInBits();
if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
else
ExtVT = EVT::getIntegerVT(*DAG.getContext(),
VT.getSizeInBits() - ShiftAmt);
} else if (Opc == ISD::AND) {
// An AND with a constant mask is the same as a truncate + zero-extend.
auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!AndC)
return SDValue();
const APInt &Mask = AndC->getAPIntValue();
unsigned ActiveBits = 0;
if (Mask.isMask()) {
ActiveBits = Mask.countTrailingOnes();
} else if (Mask.isShiftedMask()) {
ShAmt = Mask.countTrailingZeros();
APInt ShiftedMask = Mask.lshr(ShAmt);
ActiveBits = ShiftedMask.countTrailingOnes();
HasShiftedOffset = true;
} else
return SDValue();
ExtType = ISD::ZEXTLOAD;
ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
}
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
SDValue SRL = N0;
if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
ShAmt = ConstShift->getZExtValue();
unsigned EVTBits = ExtVT.getSizeInBits();
// Is the shift amount a multiple of size of VT?
if ((ShAmt & (EVTBits-1)) == 0) {
N0 = N0.getOperand(0);
// Is the load width a multiple of size of VT?
if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
// At this point, we must have a load or else we can't do the transform.
if (!isa<LoadSDNode>(N0)) return SDValue();
auto *LN0 = cast<LoadSDNode>(N0);
// Because a SRL must be assumed to *need* to zero-extend the high bits
// (as opposed to anyext the high bits), we can't combine the zextload
// lowering of SRL and an sextload.
if (LN0->getExtensionType() == ISD::SEXTLOAD)
return SDValue();
// If the shift amount is larger than the input type then we're not
// accessing any of the loaded bytes. If the load was a zextload/extload
// then the result of the shift+trunc is zero/undef (handled elsewhere).
if (ShAmt >= LN0->getMemoryVT().getSizeInBits())
return SDValue();
// If the SRL is only used by a masking AND, we may be able to adjust
// the ExtVT to make the AND redundant.
SDNode *Mask = *(SRL->use_begin());
if (Mask->getOpcode() == ISD::AND &&
isa<ConstantSDNode>(Mask->getOperand(1))) {
const APInt &ShiftMask =
cast<ConstantSDNode>(Mask->getOperand(1))->getAPIntValue();
if (ShiftMask.isMask()) {
EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
ShiftMask.countTrailingOnes());
// If the mask is smaller, recompute the type.
if ((ExtVT.getSizeInBits() > MaskedVT.getSizeInBits()) &&
TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
ExtVT = MaskedVT;
}
}
}
}
// If the load is shifted left (and the result isn't shifted back right),
// we can fold the truncate through the shift.
unsigned ShLeftAmt = 0;
if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
ShLeftAmt = N01->getZExtValue();
N0 = N0.getOperand(0);
}
}
// If we haven't found a load, we can't narrow it.
if (!isa<LoadSDNode>(N0))
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (!isLegalNarrowLdSt(LN0, ExtType, ExtVT, ShAmt))
return SDValue();
auto AdjustBigEndianShift = [&](unsigned ShAmt) {
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
return LVTStoreBits - EVTStoreBits - ShAmt;
};
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (DAG.getDataLayout().isBigEndian())
ShAmt = AdjustBigEndianShift(ShAmt);
EVT PtrType = N0.getOperand(1).getValueType();
uint64_t PtrOff = ShAmt / 8;
unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
SDLoc DL(LN0);
// The original load itself didn't wrap, so an offset within it doesn't.
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
SDValue NewPtr = DAG.getNode(ISD::ADD, DL,
PtrType, LN0->getBasePtr(),
DAG.getConstant(PtrOff, DL, PtrType),
Flags);
AddToWorklist(NewPtr.getNode());
SDValue Load;
if (ExtType == ISD::NON_EXTLOAD)
Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign,
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
else
Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
NewAlign, LN0->getMemOperand()->getFlags(),
LN0->getAAInfo());
// Replace the old load's chain with the new load's chain.
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
// Shift the result left, if we've swallowed a left shift.
SDValue Result = Load;
if (ShLeftAmt != 0) {
EVT ShImmTy = getShiftAmountTy(Result.getValueType());
if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
ShImmTy = VT;
// If the shift amount is as large as the result size (but, presumably,
// no larger than the source) then the useful bits of the result are
// zero; we can't simply return the shortened shift, because the result
// of that operation is undefined.
SDLoc DL(N0);
if (ShLeftAmt >= VT.getSizeInBits())
Result = DAG.getConstant(0, DL, VT);
else
Result = DAG.getNode(ISD::SHL, DL, VT,
Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy));
}
if (HasShiftedOffset) {
// Recalculate the shift amount after it has been altered to calculate
// the offset.
if (DAG.getDataLayout().isBigEndian())
ShAmt = AdjustBigEndianShift(ShAmt);
// We're using a shifted mask, so the load now has an offset. This means
// that data has been loaded into the lower bytes than it would have been
// before, so we need to shl the loaded data into the correct position in the
// register.
SDValue ShiftC = DAG.getConstant(ShAmt, DL, VT);
Result = DAG.getNode(ISD::SHL, DL, VT, Result, ShiftC);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
}
// Return the new loaded value.
return Result;
}
SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N1)->getVT();
unsigned VTBits = VT.getScalarSizeInBits();
unsigned EVTBits = EVT.getScalarSizeInBits();
if (N0.isUndef())
return DAG.getUNDEF(VT);
// fold (sext_in_reg c1) -> c1
if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
// If the input is already sign extended, just drop the extension.
if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
return N0;
// fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
N0.getOperand(0), N1);
// fold (sext_in_reg (sext x)) -> (sext x)
// fold (sext_in_reg (aext x)) -> (sext x)
// if x is small enough or if we know that x has more than 1 sign bit and the
// sign_extend_inreg is extending from one of them.
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N00 = N0.getOperand(0);
unsigned N00Bits = N00.getScalarValueSizeInBits();
if ((N00Bits <= EVTBits ||
(N00Bits - DAG.ComputeNumSignBits(N00)) < EVTBits) &&
(!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00);
}
// fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_inreg x)
if ((N0.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG ||
N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ||
N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
N0.getOperand(0).getScalarValueSizeInBits() == EVTBits) {
if (!LegalOperations ||
TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT))
return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, SDLoc(N), VT,
N0.getOperand(0));
}
// fold (sext_in_reg (zext x)) -> (sext x)
// iff we are extending the source sign bit.
if (N0.getOpcode() == ISD::ZERO_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getScalarValueSizeInBits() == EVTBits &&
(!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
}
// fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
if (DAG.MaskedValueIsZero(N0, APInt::getOneBitSet(VTBits, EVTBits - 1)))
return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType());
// fold operands of sext_in_reg based on knowledge that the top bits are not
// demanded.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (sext_in_reg (load x)) -> (smaller sextload x)
// fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
if (SDValue NarrowLoad = ReduceLoadWidth(N))
return NarrowLoad;
// fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
// fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
// We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
if (N0.getOpcode() == ISD::SRL) {
if (auto *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
if (ShAmt->getAPIntValue().ule(VTBits - EVTBits)) {
// We can turn this into an SRA iff the input to the SRL is already sign
// extended enough.
unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
if (((VTBits - EVTBits) - ShAmt->getZExtValue()) < InSignBits)
return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0),
N0.getOperand(1));
}
}
// fold (sext_inreg (extload x)) -> (sextload x)
// If sextload is not supported by target, we can only do the combine when
// load has one use. Doing otherwise can block folding the extload with other
// extends that the target does support.
if (ISD::isEXTLoad(N0.getNode()) &&
ISD::isUNINDEXEDLoad(N0.getNode()) &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile() &&
N0.hasOneUse()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), EVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
AddToWorklist(ExtLoad.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse() &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), EVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
BSwap, N1);
}
return SDValue();
}
SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.isUndef())
return DAG.getUNDEF(VT);
if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
return Res;
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.isUndef())
return DAG.getUNDEF(VT);
if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
return Res;
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SrcVT = N0.getValueType();
bool isLE = DAG.getDataLayout().isLittleEndian();
// noop truncate
if (SrcVT == VT)
return N0;
// fold (truncate (truncate x)) -> (truncate x)
if (N0.getOpcode() == ISD::TRUNCATE)
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
// fold (truncate c1) -> c1
if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
SDValue C = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
if (C.getNode() != N)
return C;
}
// fold (truncate (ext x)) -> (ext x) or (truncate x) or x
if (N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND) {
// if the source is smaller than the dest, we still need an extend.
if (N0.getOperand(0).getValueType().bitsLT(VT))
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
// if the source is larger than the dest, than we just need the truncate.
if (N0.getOperand(0).getValueType().bitsGT(VT))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
// if the source and dest are the same type, we can drop both the extend
// and the truncate.
return N0.getOperand(0);
}
// If this is anyext(trunc), don't fold it, allow ourselves to be folded.
if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND))
return SDValue();
// Fold extract-and-trunc into a narrow extract. For example:
// i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
// i32 y = TRUNCATE(i64 x)
// -- becomes --
// v16i8 b = BITCAST (v2i64 val)
// i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
//
// Note: We only run this optimization after type legalization (which often
// creates this pattern) and before operation legalization after which
// we need to be more careful about the vector instructions that we generate.
if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
EVT VecTy = N0.getOperand(0).getValueType();
EVT ExTy = N0.getValueType();
EVT TrTy = N->getValueType(0);
unsigned NumElem = VecTy.getVectorNumElements();
unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDLoc DL(N);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy,
DAG.getBitcast(NVT, N0.getOperand(0)),
DAG.getConstant(Index, DL, IndexTy));
}
}
// trunc (select c, a, b) -> select c, (trunc a), (trunc b)
if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) {
if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
TLI.isTruncateFree(SrcVT, VT)) {
SDLoc SL(N0);
SDValue Cond = N0.getOperand(0);
SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
}
}
// trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits()
if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
(!LegalOperations || TLI.isOperationLegal(ISD::SHL, VT)) &&
TLI.isTypeDesirableForOp(ISD::SHL, VT)) {
SDValue Amt = N0.getOperand(1);
KnownBits Known = DAG.computeKnownBits(Amt);
unsigned Size = VT.getScalarSizeInBits();
if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
SDLoc SL(N);
EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
if (AmtVT != Amt.getValueType()) {
Amt = DAG.getZExtOrTrunc(Amt, SL, AmtVT);
AddToWorklist(Amt.getNode());
}
return DAG.getNode(ISD::SHL, SL, VT, Trunc, Amt);
}
}
// Attempt to pre-truncate BUILD_VECTOR sources.
if (N0.getOpcode() == ISD::BUILD_VECTOR && !LegalOperations &&
TLI.isTruncateFree(SrcVT.getScalarType(), VT.getScalarType())) {
SDLoc DL(N);
EVT SVT = VT.getScalarType();
SmallVector<SDValue, 8> TruncOps;
for (const SDValue &Op : N0->op_values()) {
SDValue TruncOp = DAG.getNode(ISD::TRUNCATE, DL, SVT, Op);
TruncOps.push_back(TruncOp);
}
return DAG.getBuildVector(VT, DL, TruncOps);
}
// Fold a series of buildvector, bitcast, and truncate if possible.
// For example fold
// (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
// (2xi32 (buildvector x, y)).
if (Level == AfterLegalizeVectorOps && VT.isVector() &&
N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
N0.getOperand(0).hasOneUse()) {
SDValue BuildVect = N0.getOperand(0);
EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
EVT TruncVecEltTy = VT.getVectorElementType();
// Check that the element types match.
if (BuildVectEltTy == TruncVecEltTy) {
// Now we only need to compute the offset of the truncated elements.
unsigned BuildVecNumElts = BuildVect.getNumOperands();
unsigned TruncVecNumElts = VT.getVectorNumElements();
unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;
assert((BuildVecNumElts % TruncVecNumElts) == 0 &&
"Invalid number of elements");
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
Opnds.push_back(BuildVect.getOperand(i));
return DAG.getBuildVector(VT, SDLoc(N), Opnds);
}
}
// See if we can simplify the input to this truncate through knowledge that
// only the low bits are being used.
// For example "trunc (or (shl x, 8), y)" // -> trunc y
// Currently we only perform this optimization on scalars because vectors
// may have different active low bits.
if (!VT.isVector()) {
APInt Mask =
APInt::getLowBitsSet(N0.getValueSizeInBits(), VT.getSizeInBits());
if (SDValue Shorter = DAG.GetDemandedBits(N0, Mask))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
}
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
if (SDValue Reduced = ReduceLoadWidth(N))
return Reduced;
// Handle the case where the load remains an extending load even
// after truncation.
if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (!LN0->isVolatile() &&
LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
VT, LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
LN0->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
return NewLoad;
}
}
}
// fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
// where ... are all 'undef'.
if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
SmallVector<EVT, 8> VTs;
SDValue V;
unsigned Idx = 0;
unsigned NumDefs = 0;
for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
SDValue X = N0.getOperand(i);
if (!X.isUndef()) {
V = X;
Idx = i;
NumDefs++;
}
// Stop if more than one members are non-undef.
if (NumDefs > 1)
break;
VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
VT.getVectorElementType(),
X.getValueType().getVectorNumElements()));
}
if (NumDefs == 0)
return DAG.getUNDEF(VT);
if (NumDefs == 1) {
assert(V.getNode() && "The single defined operand is empty!");
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (i != Idx) {
Opnds.push_back(DAG.getUNDEF(VTs[i]));
continue;
}
SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
AddToWorklist(NV.getNode());
Opnds.push_back(NV);
}
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
}
}
// Fold truncate of a bitcast of a vector to an extract of the low vector
// element.
//
// e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, idx
if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) {
SDValue VecSrc = N0.getOperand(0);
EVT SrcVT = VecSrc.getValueType();
if (SrcVT.isVector() && SrcVT.getScalarType() == VT &&
(!LegalOperations ||
TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, SrcVT))) {
SDLoc SL(N);
EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
unsigned Idx = isLE ? 0 : SrcVT.getVectorNumElements() - 1;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT,
VecSrc, DAG.getConstant(Idx, SL, IdxVT));
}
}
// Simplify the operands using demanded-bits information.
if (!VT.isVector() &&
SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
// (trunc addcarry(X, Y, Carry)) -> (addcarry trunc(X), trunc(Y), Carry)
// When the adde's carry is not used.
if ((N0.getOpcode() == ISD::ADDE || N0.getOpcode() == ISD::ADDCARRY) &&
N0.hasOneUse() && !N0.getNode()->hasAnyUseOfValue(1) &&
// We only do for addcarry before legalize operation
((!LegalOperations && N0.getOpcode() == ISD::ADDCARRY) ||
TLI.isOperationLegal(N0.getOpcode(), VT))) {
SDLoc SL(N);
auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
auto VTs = DAG.getVTList(VT, N0->getValueType(1));
return DAG.getNode(N0.getOpcode(), SL, VTs, X, Y, N0.getOperand(2));
}
// fold (truncate (extract_subvector(ext x))) ->
// (extract_subvector x)
// TODO: This can be generalized to cover cases where the truncate and extract
// do not fully cancel each other out.
if (!LegalTypes && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::SIGN_EXTEND ||
N00.getOpcode() == ISD::ZERO_EXTEND ||
N00.getOpcode() == ISD::ANY_EXTEND) {
if (N00.getOperand(0)->getValueType(0).getVectorElementType() ==
VT.getVectorElementType())
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N0->getOperand(0)), VT,
N00.getOperand(0), N0.getOperand(1));
}
}
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
// Narrow a suitable binary operation with a non-opaque constant operand by
// moving it ahead of the truncate. This is limited to pre-legalization
// because targets may prefer a wider type during later combines and invert
// this transform.
switch (N0.getOpcode()) {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
if (!LegalOperations && N0.hasOneUse() &&
(isConstantOrConstantVector(N0.getOperand(0), true) ||
isConstantOrConstantVector(N0.getOperand(1), true))) {
// TODO: We already restricted this to pre-legalization, but for vectors
// we are extra cautious to not create an unsupported operation.
// Target-specific changes are likely needed to avoid regressions here.
if (VT.isScalarInteger() || TLI.isOperationLegal(N0.getOpcode(), VT)) {
SDLoc DL(N);
SDValue NarrowL = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(0));
SDValue NarrowR = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(1));
return DAG.getNode(N0.getOpcode(), DL, VT, NarrowL, NarrowR);
}
}
}
return SDValue();
}
static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
SDValue Elt = N->getOperand(i);
if (Elt.getOpcode() != ISD::MERGE_VALUES)
return Elt.getNode();
return Elt.getOperand(Elt.getResNo()).getNode();
}
/// build_pair (load, load) -> load
/// if load locations are consecutive.
SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
assert(N->getOpcode() == ISD::BUILD_PAIR);
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
// A BUILD_PAIR is always having the least significant part in elt 0 and the
// most significant part in elt 1. So when combining into one large load, we
// need to consider the endianness.
if (DAG.getDataLayout().isBigEndian())
std::swap(LD1, LD2);
if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
LD1->getAddressSpace() != LD2->getAddressSpace())
return SDValue();
EVT LD1VT = LD1->getValueType(0);
unsigned LD1Bytes = LD1VT.getStoreSize();
if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() &&
DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) {
unsigned Align = LD1->getAlignment();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(),
LD1->getPointerInfo(), Align);
}
return SDValue();
}
static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) {
// On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi
// and Lo parts; on big-endian machines it doesn't.
return DAG.getDataLayout().isBigEndian() ? 1 : 0;
}
static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI) {
// If this is not a bitcast to an FP type or if the target doesn't have
// IEEE754-compliant FP logic, we're done.
EVT VT = N->getValueType(0);
if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT))
return SDValue();
// TODO: Handle cases where the integer constant is a different scalar
// bitwidth to the FP.
SDValue N0 = N->getOperand(0);
EVT SourceVT = N0.getValueType();
if (VT.getScalarSizeInBits() != SourceVT.getScalarSizeInBits())
return SDValue();
unsigned FPOpcode;
APInt SignMask;
switch (N0.getOpcode()) {
case ISD::AND:
FPOpcode = ISD::FABS;
SignMask = ~APInt::getSignMask(SourceVT.getScalarSizeInBits());
break;
case ISD::XOR:
FPOpcode = ISD::FNEG;
SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
break;
case ISD::OR:
FPOpcode = ISD::FABS;
SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
break;
default:
return SDValue();
}
// Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X
// Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X
// Fold (bitcast int (or (bitcast fp X to int), 0x8000...) to fp) ->
// fneg (fabs X)
SDValue LogicOp0 = N0.getOperand(0);
ConstantSDNode *LogicOp1 = isConstOrConstSplat(N0.getOperand(1), true);
if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask &&
LogicOp0.getOpcode() == ISD::BITCAST &&
LogicOp0.getOperand(0).getValueType() == VT) {
SDValue FPOp = DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0.getOperand(0));
NumFPLogicOpsConv++;
if (N0.getOpcode() == ISD::OR)
return DAG.getNode(ISD::FNEG, SDLoc(N), VT, FPOp);
return FPOp;
}
return SDValue();
}
SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.isUndef())
return DAG.getUNDEF(VT);
// If the input is a BUILD_VECTOR with all constant elements, fold this now.
// Only do this before legalize types, unless both types are integer and the
// scalar type is legal. Only do this before legalize ops, since the target
// maybe depending on the bitcast.
// First check to see if this is all constant.
// TODO: Support FP bitcasts after legalize types.
if (VT.isVector() &&
(!LegalTypes ||
(!LegalOperations && VT.isInteger() && N0.getValueType().isInteger() &&
TLI.isTypeLegal(VT.getVectorElementType()))) &&
N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
cast<BuildVectorSDNode>(N0)->isConstant())
return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(),
VT.getVectorElementType());
// If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
// If we can't allow illegal operations, we need to check that this is just
// a fp -> int or int -> conversion and that the resulting operation will
// be legal.
if (!LegalOperations ||
(isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
(isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
TLI.isOperationLegal(ISD::Constant, VT))) {
SDValue C = DAG.getBitcast(VT, N0);
if (C.getNode() != N)
return C;
}
}
// (conv (conv x, t1), t2) -> (conv x, t2)
if (N0.getOpcode() == ISD::BITCAST)
return DAG.getBitcast(VT, N0.getOperand(0));
// fold (conv (load x)) -> (load (conv*)x)
// If the resultant load doesn't need a higher alignment than the original!
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
// Do not remove the cast if the types differ in endian layout.
TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
// If the load is volatile, we only want to change the load type if the
// resulting load is legal. Otherwise we might increase the number of
// memory accesses. We don't care if the original type was legal or not
// as we assume software couldn't rely on the number of accesses of an
// illegal type.
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isOperationLegal(ISD::LOAD, VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (TLI.isLoadBitCastBeneficial(N0.getValueType(), VT, DAG,
*LN0->getMemOperand())) {
SDValue Load =
DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
LN0->getPointerInfo(), LN0->getAlignment(),
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
return Load;
}
}
if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI))
return V;
// fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
// fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
//
// For ppc_fp128:
// fold (bitcast (fneg x)) ->
// flipbit = signbit
// (xor (bitcast x) (build_pair flipbit, flipbit))
//
// fold (bitcast (fabs x)) ->
// flipbit = (and (extract_element (bitcast x), 0), signbit)
// (xor (bitcast x) (build_pair flipbit, flipbit))
// This often reduces constant pool loads.
if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
(N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
N0.getNode()->hasOneUse() && VT.isInteger() &&
!VT.isVector() && !N0.getValueType().isVector()) {
SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0));
AddToWorklist(NewConv.getNode());
SDLoc DL(N);
if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
assert(VT.getSizeInBits() == 128);
SDValue SignBit = DAG.getConstant(
APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
SDValue FlipBit;
if (N0.getOpcode() == ISD::FNEG) {
FlipBit = SignBit;
AddToWorklist(FlipBit.getNode());
} else {
assert(N0.getOpcode() == ISD::FABS);
SDValue Hi =
DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv,
DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
SDLoc(NewConv)));
AddToWorklist(Hi.getNode());
FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit);
AddToWorklist(FlipBit.getNode());
}
SDValue FlipBits =
DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
AddToWorklist(FlipBits.getNode());
return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits);
}
APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
if (N0.getOpcode() == ISD::FNEG)
return DAG.getNode(ISD::XOR, DL, VT,
NewConv, DAG.getConstant(SignBit, DL, VT));
assert(N0.getOpcode() == ISD::FABS);
return DAG.getNode(ISD::AND, DL, VT,
NewConv, DAG.getConstant(~SignBit, DL, VT));
}
// fold (bitconvert (fcopysign cst, x)) ->
// (or (and (bitconvert x), sign), (and cst, (not sign)))
// Note that we don't handle (copysign x, cst) because this can always be
// folded to an fneg or fabs.
//
// For ppc_fp128:
// fold (bitcast (fcopysign cst, x)) ->
// flipbit = (and (extract_element
// (xor (bitcast cst), (bitcast x)), 0),
// signbit)
// (xor (bitcast cst) (build_pair flipbit, flipbit))
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
isa<ConstantFPSDNode>(N0.getOperand(0)) &&
VT.isInteger() && !VT.isVector()) {
unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
AddToWorklist(X.getNode());
// If X has a different width than the result/lhs, sext it or truncate it.
unsigned VTWidth = VT.getSizeInBits();
if (OrigXWidth < VTWidth) {
X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
AddToWorklist(X.getNode());
} else if (OrigXWidth > VTWidth) {
// To get the sign bit in the right place, we have to shift it right
// before truncating.
SDLoc DL(X);
X = DAG.getNode(ISD::SRL, DL,
X.getValueType(), X,
DAG.getConstant(OrigXWidth-VTWidth, DL,
X.getValueType()));
AddToWorklist(X.getNode());
X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
AddToWorklist(X.getNode());
}
if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2);
SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
AddToWorklist(Cst.getNode());
SDValue X = DAG.getBitcast(VT, N0.getOperand(1));
AddToWorklist(X.getNode());
SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X);
AddToWorklist(XorResult.getNode());
SDValue XorResult64 = DAG.getNode(
ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult,
DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
SDLoc(XorResult)));
AddToWorklist(XorResult64.getNode());
SDValue FlipBit =
DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64,
DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64));
AddToWorklist(FlipBit.getNode());
SDValue FlipBits =
DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
AddToWorklist(FlipBits.getNode());
return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits);
}
APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
X = DAG.getNode(ISD::AND, SDLoc(X), VT,
X, DAG.getConstant(SignBit, SDLoc(X), VT));
AddToWorklist(X.getNode());
SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
AddToWorklist(Cst.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
}
}
// bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
if (N0.getOpcode() == ISD::BUILD_PAIR)
if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT))
return CombineLD;
// Remove double bitcasts from shuffles - this is often a legacy of
// XformToShuffleWithZero being used to combine bitmaskings (of
// float vectors bitcast to integer vectors) into shuffles.
// bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
N0->getOpcode() == ISD::VECTOR_SHUFFLE && N0.hasOneUse() &&
VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
!(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
// If operands are a bitcast, peek through if it casts the original VT.
// If operands are a constant, just bitcast back to original VT.
auto PeekThroughBitcast = [&](SDValue Op) {
if (Op.getOpcode() == ISD::BITCAST &&
Op.getOperand(0).getValueType() == VT)
return SDValue(Op.getOperand(0));
if (Op.isUndef() || ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
return DAG.getBitcast(VT, Op);
return SDValue();
};
// FIXME: If either input vector is bitcast, try to convert the shuffle to
// the result type of this bitcast. This would eliminate at least one
// bitcast. See the transform in InstCombine.
SDValue SV0 = PeekThroughBitcast(N0->getOperand(0));
SDValue SV1 = PeekThroughBitcast(N0->getOperand(1));
if (!(SV0 && SV1))
return SDValue();
int MaskScale =
VT.getVectorNumElements() / N0.getValueType().getVectorNumElements();
SmallVector<int, 8> NewMask;
for (int M : SVN->getMask())
for (int i = 0; i != MaskScale; ++i)
NewMask.push_back(M < 0 ? -1 : M * MaskScale + i);
bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
if (!LegalMask) {
std::swap(SV0, SV1);
ShuffleVectorSDNode::commuteMask(NewMask);
LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
}
if (LegalMask)
return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask);
}
return SDValue();
}
SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
EVT VT = N->getValueType(0);
return CombineConsecutiveLoads(N, VT);
}
/// We know that BV is a build_vector node with Constant, ConstantFP or Undef
/// operands. DstEltVT indicates the destination element value type.
SDValue DAGCombiner::
ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
// If this is already the right type, we're done.
if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
unsigned SrcBitSize = SrcEltVT.getSizeInBits();
unsigned DstBitSize = DstEltVT.getSizeInBits();
// If this is a conversion of N elements of one type to N elements of another
// type, convert each element. This handles FP<->INT cases.
if (SrcBitSize == DstBitSize) {
SmallVector<SDValue, 8> Ops;
for (SDValue Op : BV->op_values()) {
// If the vector element type is not legal, the BUILD_VECTOR operands
// are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT)
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
Ops.push_back(DAG.getBitcast(DstEltVT, Op));
AddToWorklist(Ops.back().getNode());
}
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
BV->getValueType(0).getVectorNumElements());
return DAG.getBuildVector(VT, SDLoc(BV), Ops);
}
// Otherwise, we're growing or shrinking the elements. To avoid having to
// handle annoying details of growing/shrinking FP values, we convert them to
// int first.
if (SrcEltVT.isFloatingPoint()) {
// Convert the input float vector to a int vector where the elements are the
// same sizes.
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT;
}
// Now we know the input is an integer vector. If the output is a FP type,
// convert to integer first, then to FP of the right size.
if (DstEltVT.isFloatingPoint()) {
EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
// Next, convert to FP elements of the same size.
return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
}
SDLoc DL(BV);
// Okay, we know the src/dst types are both integers of differing types.
// Handling growing first.
assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
if (SrcBitSize < DstBitSize) {
unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
i += NumInputsPerOutput) {
bool isLE = DAG.getDataLayout().isLittleEndian();
APInt NewBits = APInt(DstBitSize, 0);
bool EltIsUndef = true;
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
// Shift the previously computed bits over.
NewBits <<= SrcBitSize;
SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
if (Op.isUndef()) continue;
EltIsUndef = false;
NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
zextOrTrunc(SrcBitSize).zext(DstBitSize);
}
if (EltIsUndef)
Ops.push_back(DAG.getUNDEF(DstEltVT));
else
Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT));
}
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
return DAG.getBuildVector(VT, DL, Ops);
}
// Finally, this must be the case where we are shrinking elements: each input
// turns into multiple outputs.
unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
NumOutputsPerInput*BV->getNumOperands());
SmallVector<SDValue, 8> Ops;
for (const SDValue &Op : BV->op_values()) {
if (Op.isUndef()) {
Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
continue;
}
APInt OpVal = cast<ConstantSDNode>(Op)->
getAPIntValue().zextOrTrunc(SrcBitSize);
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
APInt ThisVal = OpVal.trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
OpVal.lshrInPlace(DstBitSize);
}
// For big endian targets, swap the order of the pieces of each element.
if (DAG.getDataLayout().isBigEndian())
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
return DAG.getBuildVector(VT, DL, Ops);
}
static bool isContractable(SDNode *N) {
SDNodeFlags F = N->getFlags();
return F.hasAllowContract() || F.hasAllowReassociation();
}
/// Try to perform FMA combining on a given FADD node.
SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc SL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// Floating-point multiply-add with intermediate rounding.
bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
// Floating-point multiply-add without intermediate rounding.
bool HasFMA =
TLI.isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
SDNodeFlags Flags = N->getFlags();
bool CanFuse = Options.UnsafeFPMath || isContractable(N);
bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
CanFuse || HasFMAD);
// If the addition is not contractable, do not combine.
if (!AllowFusionGlobally && !isContractable(N))
return SDValue();
const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
return SDValue();
// Always prefer FMAD to FMA for precision.
unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
// Is the node an FMUL and contractable either due to global flags or
// SDNodeFlags.
auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
if (N.getOpcode() != ISD::FMUL)
return false;
return AllowFusionGlobally || isContractable(N.getNode());
};
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
if (Aggressive && isContractableFMUL(N0) && isContractableFMUL(N1)) {
if (N0.getNode()->use_size() > N1.getNode()->use_size())
std::swap(N0, N1);
}
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1), N1, Flags);
}
// fold (fadd x, (fmul y, z)) -> (fma y, z, x)
// Note: Commutes FADD operands.
if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(0), N1.getOperand(1), N0, Flags);
}
// Look through FP_EXTEND nodes to do more combining.
// fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (isContractableFMUL(N00) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)), N1, Flags);
}
}
// fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
// Note: Commutes FADD operands.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (isContractableFMUL(N10) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)), N0, Flags);
}
}
// More folding opportunities when target permits.
if (Aggressive) {
// fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z))
if (CanFuse &&
N0.getOpcode() == PreferredFusedOpcode &&
N0.getOperand(2).getOpcode() == ISD::FMUL &&
N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
N1, Flags), Flags);
}
// fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x))
if (CanFuse &&
N1->getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FMUL &&
N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(0), N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(2).getOperand(0),
N1.getOperand(2).getOperand(1),
N0, Flags), Flags);
}
// fold (fadd (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y, (fma (fpext u), (fpext v), z))
auto FoldFAddFMAFPExtFMul = [&] (
SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
SDNodeFlags Flags) {
return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
Z, Flags), Flags);
};
if (N0.getOpcode() == PreferredFusedOpcode) {
SDValue N02 = N0.getOperand(2);
if (N02.getOpcode() == ISD::FP_EXTEND) {
SDValue N020 = N02.getOperand(0);
if (isContractableFMUL(N020) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
N020.getOperand(0), N020.getOperand(1),
N1, Flags);
}
}
}
// fold (fadd (fpext (fma x, y, (fmul u, v))), z)
// -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
auto FoldFAddFPExtFMAFMul = [&] (
SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
SDNodeFlags Flags) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
Z, Flags), Flags);
};
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == PreferredFusedOpcode) {
SDValue N002 = N00.getOperand(2);
if (isContractableFMUL(N002) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
N002.getOperand(0), N002.getOperand(1),
N1, Flags);
}
}
}
// fold (fadd x, (fma y, z, (fpext (fmul u, v)))
// -> (fma y, z, (fma (fpext u), (fpext v), x))
if (N1.getOpcode() == PreferredFusedOpcode) {
SDValue N12 = N1.getOperand(2);
if (N12.getOpcode() == ISD::FP_EXTEND) {
SDValue N120 = N12.getOperand(0);
if (isContractableFMUL(N120) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
N120.getOperand(0), N120.getOperand(1),
N0, Flags);
}
}
}
// fold (fadd x, (fpext (fma y, z, (fmul u, v)))
// -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (N10.getOpcode() == PreferredFusedOpcode) {
SDValue N102 = N10.getOperand(2);
if (isContractableFMUL(N102) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
N102.getOperand(0), N102.getOperand(1),
N0, Flags);
}
}
}
}
return SDValue();
}
/// Try to perform FMA combining on a given FSUB node.
SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc SL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// Floating-point multiply-add with intermediate rounding.
bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
// Floating-point multiply-add without intermediate rounding.
bool HasFMA =
TLI.isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
const SDNodeFlags Flags = N->getFlags();
bool CanFuse = Options.UnsafeFPMath || isContractable(N);
bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
CanFuse || HasFMAD);
// If the subtraction is not contractable, do not combine.
if (!AllowFusionGlobally && !isContractable(N))
return SDValue();
const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
return SDValue();
// Always prefer FMAD to FMA for precision.
unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
// Is the node an FMUL and contractable either due to global flags or
// SDNodeFlags.
auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
if (N.getOpcode() != ISD::FMUL)
return false;
return AllowFusionGlobally || isContractable(N.getNode());
};
// fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
}
// fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
// Note: Commutes FSUB operands.
if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
N1.getOperand(0)),
N1.getOperand(1), N0, Flags);
}
// fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
if (N0.getOpcode() == ISD::FNEG && isContractableFMUL(N0.getOperand(0)) &&
(Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) {
SDValue N00 = N0.getOperand(0).getOperand(0);
SDValue N01 = N0.getOperand(0).getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
}
// Look through FP_EXTEND nodes to do more combining.
// fold (fsub (fpext (fmul x, y)), z)
// -> (fma (fpext x), (fpext y), (fneg z))
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (isContractableFMUL(N00) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
}
}
// fold (fsub x, (fpext (fmul y, z)))
// -> (fma (fneg (fpext y)), (fpext z), x)
// Note: Commutes FSUB operands.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (isContractableFMUL(N10) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(0))),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)),
N0, Flags);
}
}
// fold (fsub (fpext (fneg (fmul, x, y))), z)
// -> (fneg (fma (fpext x), (fpext y), z))
// Note: This could be removed with appropriate canonicalization of the
// input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
// orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
// from implementing the canonicalization in visitFSUB.
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FNEG) {
SDValue N000 = N00.getOperand(0);
if (isContractableFMUL(N000) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
return DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(1)),
N1, Flags));
}
}
}
// fold (fsub (fneg (fpext (fmul, x, y))), z)
// -> (fneg (fma (fpext x)), (fpext y), z)
// Note: This could be removed with appropriate canonicalization of the
// input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
// orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
// from implementing the canonicalization in visitFSUB.
if (N0.getOpcode() == ISD::FNEG) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FP_EXTEND) {
SDValue N000 = N00.getOperand(0);
if (isContractableFMUL(N000) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N000.getValueType())) {
return DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(1)),
N1, Flags));
}
}
}
// More folding opportunities when target permits.
if (Aggressive) {
// fold (fsub (fma x, y, (fmul u, v)), z)
// -> (fma x, y (fma u, v, (fneg z)))
if (CanFuse && N0.getOpcode() == PreferredFusedOpcode &&
isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() &&
N0.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
}
// fold (fsub x, (fma y, z, (fmul u, v)))
// -> (fma (fneg y), z, (fma (fneg u), v, x))
if (CanFuse && N1.getOpcode() == PreferredFusedOpcode &&
isContractableFMUL(N1.getOperand(2))) {
SDValue N20 = N1.getOperand(2).getOperand(0);
SDValue N21 = N1.getOperand(2).getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
N1.getOperand(0)),
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N20),
N21, N0, Flags), Flags);
}
// fold (fsub (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y (fma (fpext u), (fpext v), (fneg z)))
if (N0.getOpcode() == PreferredFusedOpcode) {
SDValue N02 = N0.getOperand(2);
if (N02.getOpcode() == ISD::FP_EXTEND) {
SDValue N020 = N02.getOperand(0);
if (isContractableFMUL(N020) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
}
}
}
// fold (fsub (fpext (fma x, y, (fmul u, v))), z)
// -> (fma (fpext x), (fpext y),
// (fma (fpext u), (fpext v), (fneg z)))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == PreferredFusedOpcode) {
SDValue N002 = N00.getOperand(2);
if (isContractableFMUL(N002) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
}
}
}
// fold (fsub x, (fma y, z, (fpext (fmul u, v))))
// -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x))
if (N1.getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) {
SDValue N120 = N1.getOperand(2).getOperand(0);
if (isContractableFMUL(N120) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
SDValue N1200 = N120.getOperand(0);
SDValue N1201 = N120.getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)),
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL,
VT, N1200)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1201),
N0, Flags), Flags);
}
}
// fold (fsub x, (fpext (fma y, z, (fmul u, v))))
// -> (fma (fneg (fpext y)), (fpext z),
// (fma (fneg (fpext u)), (fpext v), x))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == PreferredFusedOpcode) {
SDValue CvtSrc = N1.getOperand(0);
SDValue N100 = CvtSrc.getOperand(0);
SDValue N101 = CvtSrc.getOperand(1);
SDValue N102 = CvtSrc.getOperand(2);
if (isContractableFMUL(N102) &&
TLI.isFPExtFoldable(PreferredFusedOpcode, VT, CvtSrc.getValueType())) {
SDValue N1020 = N102.getOperand(0);
SDValue N1021 = N102.getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N100)),
DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL,
VT, N1020)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1021),
N0, Flags), Flags);
}
}
}
return SDValue();
}
/// Try to perform FMA combining on a given FMUL node based on the distributive
/// law x * (y + 1) = x * y + x and variants thereof (commuted versions,
/// subtraction instead of addition).
SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc SL(N);
const SDNodeFlags Flags = N->getFlags();
assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation");
const TargetOptions &Options = DAG.getTarget().Options;
// The transforms below are incorrect when x == 0 and y == inf, because the
// intermediate multiplication produces a nan.
if (!Options.NoInfsFPMath)
return SDValue();
// Floating-point multiply-add without intermediate rounding.
bool HasFMA =
(Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
TLI.isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
// Floating-point multiply-add with intermediate rounding. This can result
// in a less precise result due to the changed rounding order.
bool HasFMAD = Options.UnsafeFPMath &&
(LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
// Always prefer FMAD to FMA for precision.
unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
// fold (fmul (fadd x0, +1.0), y) -> (fma x0, y, y)
// fold (fmul (fadd x0, -1.0), y) -> (fma x0, y, (fneg y))
auto FuseFADD = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) {
if (auto *C = isConstOrConstSplatFP(X.getOperand(1), true)) {
if (C->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
Y, Flags);
if (C->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
}
}
return SDValue();
};
if (SDValue FMA = FuseFADD(N0, N1, Flags))
return FMA;
if (SDValue FMA = FuseFADD(N1, N0, Flags))
return FMA;
// fold (fmul (fsub +1.0, x1), y) -> (fma (fneg x1), y, y)
// fold (fmul (fsub -1.0, x1), y) -> (fma (fneg x1), y, (fneg y))
// fold (fmul (fsub x0, +1.0), y) -> (fma x0, y, (fneg y))
// fold (fmul (fsub x0, -1.0), y) -> (fma x0, y, y)
auto FuseFSUB = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) {
if (auto *C0 = isConstOrConstSplatFP(X.getOperand(0), true)) {
if (C0->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
Y, Flags);
if (C0->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
}
if (auto *C1 = isConstOrConstSplatFP(X.getOperand(1), true)) {
if (C1->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
if (C1->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
Y, Flags);
}
}
return SDValue();
};
if (SDValue FMA = FuseFSUB(N0, N1, Flags))
return FMA;
if (SDValue FMA = FuseFSUB(N1, N0, Flags))
return FMA;
return SDValue();
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0);
bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fadd c1, c2) -> c1 + c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags);
// N0 + -0.0 --> N0 (also allowed with +0.0 and fast-math)
ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, true);
if (N1C && N1C->isZero())
if (N1C->isNegative() || Options.UnsafeFPMath || Flags.hasNoSignedZeros())
return N0;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// fold (fadd A, (fneg B)) -> (fsub A, B)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
isNegatibleForFree(N1, LegalOperations, TLI, &Options, ForCodeSize) == 2)
return DAG.getNode(ISD::FSUB, DL, VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations,
ForCodeSize), Flags);
// fold (fadd (fneg A), B) -> (fsub B, A)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
isNegatibleForFree(N0, LegalOperations, TLI, &Options, ForCodeSize) == 2)
return DAG.getNode(ISD::FSUB, DL, VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations,
ForCodeSize), Flags);
auto isFMulNegTwo = [](SDValue FMul) {
if (!FMul.hasOneUse() || FMul.getOpcode() != ISD::FMUL)
return false;
auto *C = isConstOrConstSplatFP(FMul.getOperand(1), true);
return C && C->isExactlyValue(-2.0);
};
// fadd (fmul B, -2.0), A --> fsub A, (fadd B, B)
if (isFMulNegTwo(N0)) {
SDValue B = N0.getOperand(0);
SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B, Flags);
return DAG.getNode(ISD::FSUB, DL, VT, N1, Add, Flags);
}
// fadd A, (fmul B, -2.0) --> fsub A, (fadd B, B)
if (isFMulNegTwo(N1)) {
SDValue B = N1.getOperand(0);
SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B, Flags);
return DAG.getNode(ISD::FSUB, DL, VT, N0, Add, Flags);
}
// No FP constant should be created after legalization as Instruction
// Selection pass has a hard time dealing with FP constants.
bool AllowNewConst = (Level < AfterLegalizeDAG);
// If nnan is enabled, fold lots of things.
if ((Options.NoNaNsFPMath || Flags.hasNoNaNs()) && AllowNewConst) {
// If allowed, fold (fadd (fneg x), x) -> 0.0
if (N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
return DAG.getConstantFP(0.0, DL, VT);
// If allowed, fold (fadd x, (fneg x)) -> 0.0
if (N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
return DAG.getConstantFP(0.0, DL, VT);
}
// If 'unsafe math' or reassoc and nsz, fold lots of things.
// TODO: break out portions of the transformations below for which Unsafe is
// considered and which do not require both nsz and reassoc
if ((Options.UnsafeFPMath ||
(Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
AllowNewConst) {
// fadd (fadd x, c1), c2 -> fadd x, c1 + c2
if (N1CFP && N0.getOpcode() == ISD::FADD &&
isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, Flags);
return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC, Flags);
}
// We can fold chains of FADD's of the same value into multiplications.
// This transform is not safe in general because we are reducing the number
// of rounding steps.
if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
if (N0.getOpcode() == ISD::FMUL) {
bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
// (fadd (fmul x, c), x) -> (fmul x, c+1)
if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
DAG.getConstantFP(1.0, DL, VT), Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags);
}
// (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
DAG.getConstantFP(2.0, DL, VT), Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags);
}
}
if (N1.getOpcode() == ISD::FMUL) {
bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
// (fadd x, (fmul x, c)) -> (fmul x, c+1)
if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
DAG.getConstantFP(1.0, DL, VT), Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags);
}
// (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N0.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
DAG.getConstantFP(2.0, DL, VT), Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags);
}
}
if (N0.getOpcode() == ISD::FADD) {
bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
// (fadd (fadd x, x), x) -> (fmul x, 3.0)
if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
(N0.getOperand(0) == N1)) {
return DAG.getNode(ISD::FMUL, DL, VT,
N1, DAG.getConstantFP(3.0, DL, VT), Flags);
}
}
if (N1.getOpcode() == ISD::FADD) {
bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
// (fadd x, (fadd x, x)) -> (fmul x, 3.0)
if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
N1.getOperand(0) == N0) {
return DAG.getNode(ISD::FMUL, DL, VT,
N0, DAG.getConstantFP(3.0, DL, VT), Flags);
}
}
// (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0),
DAG.getConstantFP(4.0, DL, VT), Flags);
}
}
} // enable-unsafe-fp-math
// FADD -> FMA combines:
if (SDValue Fused = visitFADDForFMACombine(N)) {
AddToWorklist(Fused.getNode());
return Fused;
}
return SDValue();
}
SDValue DAGCombiner::visitFSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fsub c1, c2) -> c1-c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
// (fsub A, 0) -> A
if (N1CFP && N1CFP->isZero()) {
if (!N1CFP->isNegative() || Options.UnsafeFPMath ||
Flags.hasNoSignedZeros()) {
return N0;
}
}
if (N0 == N1) {
// (fsub x, x) -> 0.0
if (Options.NoNaNsFPMath || Flags.hasNoNaNs())
return DAG.getConstantFP(0.0f, DL, VT);
}
// (fsub -0.0, N1) -> -N1
// NOTE: It is safe to transform an FSUB(-0.0,X) into an FNEG(X), since the
// FSUB does not specify the sign bit of a NaN. Also note that for
// the same reason, the inverse transform is not safe, unless fast math
// flags are in play.
if (N0CFP && N0CFP->isZero()) {
if (N0CFP->isNegative() ||
(Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options, ForCodeSize))
return GetNegatedExpression(N1, DAG, LegalOperations, ForCodeSize);
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags);
}
}
if ((Options.UnsafeFPMath ||
(Flags.hasAllowReassociation() && Flags.hasNoSignedZeros()))
&& N1.getOpcode() == ISD::FADD) {
// X - (X + Y) -> -Y
if (N0 == N1->getOperand(0))
return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(1), Flags);
// X - (Y + X) -> -Y
if (N0 == N1->getOperand(1))
return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(0), Flags);
}
// fold (fsub A, (fneg B)) -> (fadd A, B)
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options, ForCodeSize))
return DAG.getNode(ISD::FADD, DL, VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations,
ForCodeSize), Flags);
// FSUB -> FMA combines:
if (SDValue Fused = visitFSUBForFMACombine(N)) {
AddToWorklist(Fused.getNode());
return Fused;
}
return SDValue();
}
SDValue DAGCombiner::visitFMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector()) {
// This just handles C1 * C2 for vectors. Other vector folds are below.
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
}
// fold (fmul c1, c2) -> c1*c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags);
// canonicalize constant to RHS
if (isConstantFPBuildVectorOrConstantFP(N0) &&
!isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags);
// fold (fmul A, 1.0) -> A
if (N1CFP && N1CFP->isExactlyValue(1.0))
return N0;
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if ((Options.NoNaNsFPMath && Options.NoSignedZerosFPMath) ||
(Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) {
// fold (fmul A, 0) -> 0
if (N1CFP && N1CFP->isZero())
return N1;
}
if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
// fmul (fmul X, C1), C2 -> fmul X, C1 * C2
if (isConstantFPBuildVectorOrConstantFP(N1) &&
N0.getOpcode() == ISD::FMUL) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
// Avoid an infinite loop by making sure that N00 is not a constant
// (the inner multiply has not been constant folded yet).
if (isConstantFPBuildVectorOrConstantFP(N01) &&
!isConstantFPBuildVectorOrConstantFP(N00)) {
SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags);
}
}
// Match a special-case: we convert X * 2.0 into fadd.
// fmul (fadd X, X), C -> fmul X, 2.0 * C
if (N0.getOpcode() == ISD::FADD && N0.hasOneUse() &&
N0.getOperand(0) == N0.getOperand(1)) {
const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags);
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags);
}
}
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags);
// fold (fmul X, -1.0) -> (fneg X)
if (N1CFP && N1CFP->isExactlyValue(-1.0))
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, DL, VT, N0);
// fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options,
ForCodeSize)) {
if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options,
ForCodeSize)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
return DAG.getNode(ISD::FMUL, DL, VT,
GetNegatedExpression(N0, DAG, LegalOperations,
ForCodeSize),
GetNegatedExpression(N1, DAG, LegalOperations,
ForCodeSize),
Flags);
}
}
// fold (fmul X, (select (fcmp X > 0.0), -1.0, 1.0)) -> (fneg (fabs X))
// fold (fmul X, (select (fcmp X > 0.0), 1.0, -1.0)) -> (fabs X)
if (Flags.hasNoNaNs() && Flags.hasNoSignedZeros() &&
(N0.getOpcode() == ISD::SELECT || N1.getOpcode() == ISD::SELECT) &&
TLI.isOperationLegal(ISD::FABS, VT)) {
SDValue Select = N0, X = N1;
if (Select.getOpcode() != ISD::SELECT)
std::swap(Select, X);
SDValue Cond = Select.getOperand(0);
auto TrueOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(1));
auto FalseOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(2));
if (TrueOpnd && FalseOpnd &&
Cond.getOpcode() == ISD::SETCC && Cond.getOperand(0) == X &&
isa<ConstantFPSDNode>(Cond.getOperand(1)) &&
cast<ConstantFPSDNode>(Cond.getOperand(1))->isExactlyValue(0.0)) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
switch (CC) {
default: break;
case ISD::SETOLT:
case ISD::SETULT:
case ISD::SETOLE:
case ISD::SETULE:
case ISD::SETLT:
case ISD::SETLE:
std::swap(TrueOpnd, FalseOpnd);
LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETUGT:
case ISD::SETOGE:
case ISD::SETUGE:
case ISD::SETGT:
case ISD::SETGE:
if (TrueOpnd->isExactlyValue(-1.0) && FalseOpnd->isExactlyValue(1.0) &&
TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, DL, VT,
DAG.getNode(ISD::FABS, DL, VT, X));
if (TrueOpnd->isExactlyValue(1.0) && FalseOpnd->isExactlyValue(-1.0))
return DAG.getNode(ISD::FABS, DL, VT, X);
break;
}
}
}
// FMUL -> FMA combines:
if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) {
AddToWorklist(Fused.getNode());
return Fused;
}
return SDValue();
}
SDValue DAGCombiner::visitFMA(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// FMA nodes have flags that propagate to the created nodes.
const SDNodeFlags Flags = N->getFlags();
bool UnsafeFPMath = Options.UnsafeFPMath || isContractable(N);
// Constant fold FMA.
if (isa<ConstantFPSDNode>(N0) &&
isa<ConstantFPSDNode>(N1) &&
isa<ConstantFPSDNode>(N2)) {
return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2);
}
if (UnsafeFPMath) {
if (N0CFP && N0CFP->isZero())
return N2;
if (N1CFP && N1CFP->isZero())
return N2;
}
// TODO: The FMA node should have flags that propagate to these nodes.
if (N0CFP && N0CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
if (N1CFP && N1CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
// Canonicalize (fma c, x, y) -> (fma x, c, y)
if (isConstantFPBuildVectorOrConstantFP(N0) &&
!isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
if (UnsafeFPMath) {
// (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
isConstantFPBuildVectorOrConstantFP(N1) &&
isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1),
Flags), Flags);
}
// (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
if (N0.getOpcode() == ISD::FMUL &&
isConstantFPBuildVectorOrConstantFP(N1) &&
isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
return DAG.getNode(ISD::FMA, DL, VT,
N0.getOperand(0),
DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1),
Flags),
N2);
}
}
// (fma x, 1, y) -> (fadd x, y)
// (fma x, -1, y) -> (fadd (fneg x), y)
if (N1CFP) {
if (N1CFP->isExactlyValue(1.0))
// TODO: The FMA node should have flags that propagate to this node.
return DAG.getNode(ISD::FADD, DL, VT, N0, N2);
if (N1CFP->isExactlyValue(-1.0) &&
(!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0);
AddToWorklist(RHSNeg.getNode());
// TODO: The FMA node should have flags that propagate to this node.
return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg);
}
// fma (fneg x), K, y -> fma x -K, y
if (N0.getOpcode() == ISD::FNEG &&
(TLI.isOperationLegal(ISD::ConstantFP, VT) ||
(N1.hasOneUse() && !TLI.isFPImmLegal(N1CFP->getValueAPF(), VT,
ForCodeSize)))) {
return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
DAG.getNode(ISD::FNEG, DL, VT, N1, Flags), N2);
}
}
if (UnsafeFPMath) {
// (fma x, c, x) -> (fmul x, (c+1))
if (N1CFP && N0 == N2) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1,
DAG.getConstantFP(1.0, DL, VT), Flags),
Flags);
}
// (fma x, c, (fneg x)) -> (fmul x, (c-1))
if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1,
DAG.getConstantFP(-1.0, DL, VT), Flags),
Flags);
}
}
return SDValue();
}
// Combine multiple FDIVs with the same divisor into multiple FMULs by the
// reciprocal.
// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
// Notice that this is not always beneficial. One reason is different targets
// may have different costs for FDIV and FMUL, so sometimes the cost of two
// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
// TODO: Limit this transform based on optsize/minsize - it always creates at
// least 1 extra instruction. But the perf win may be substantial enough
// that only minsize should restrict this.
bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath;
const SDNodeFlags Flags = N->getFlags();
if (!UnsafeMath && !Flags.hasAllowReciprocal())
return SDValue();
// Skip if current node is a reciprocal/fneg-reciprocal.
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, /* AllowUndefs */ true);
if (N0CFP && (N0CFP->isExactlyValue(1.0) || N0CFP->isExactlyValue(-1.0)))
return SDValue();
// Exit early if the target does not want this transform or if there can't
// possibly be enough uses of the divisor to make the transform worthwhile.
SDValue N1 = N->getOperand(1);
unsigned MinUses = TLI.combineRepeatedFPDivisors();
// For splat vectors, scale the number of uses by the splat factor. If we can
// convert the division into a scalar op, that will likely be much faster.
unsigned NumElts = 1;
EVT VT = N->getValueType(0);
if (VT.isVector() && DAG.isSplatValue(N1))
NumElts = VT.getVectorNumElements();
if (!MinUses || (N1->use_size() * NumElts) < MinUses)
return SDValue();
// Find all FDIV users of the same divisor.
// Use a set because duplicates may be present in the user list.
SetVector<SDNode *> Users;
for (auto *U : N1->uses()) {
if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
// This division is eligible for optimization only if global unsafe math
// is enabled or if this division allows reciprocal formation.
if (UnsafeMath || U->getFlags().hasAllowReciprocal())
Users.insert(U);
}
}
// Now that we have the actual number of divisor uses, make sure it meets
// the minimum threshold specified by the target.
if ((Users.size() * NumElts) < MinUses)
return SDValue();
SDLoc DL(N);
SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags);
// Dividend / Divisor -> Dividend * Reciprocal
for (auto *U : Users) {
SDValue Dividend = U->getOperand(0);
if (Dividend != FPOne) {
SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
Reciprocal, Flags);
CombineTo(U, NewNode);
} else if (U != Reciprocal.getNode()) {
// In the absence of fast-math-flags, this user node is always the
// same node as Reciprocal, but with FMF they may be different nodes.
CombineTo(U, Reciprocal);
}
}
return SDValue(N, 0); // N was replaced.
}
SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fdiv c1, c2) -> c1/c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if (SDValue V = combineRepeatedFPDivisors(N))
return V;
if (Options.UnsafeFPMath || Flags.hasAllowReciprocal()) {
// fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
if (N1CFP) {
// Compute the reciprocal 1.0 / c2.
const APFloat &N1APF = N1CFP->getValueAPF();
APFloat Recip(N1APF.getSemantics(), 1); // 1.0
APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
// Only do the transform if the reciprocal is a legal fp immediate that
// isn't too nasty (eg NaN, denormal, ...).
if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
(!LegalOperations ||
// FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
// backend)... we should handle this gracefully after Legalize.
// TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT) ||
TLI.isOperationLegal(ISD::ConstantFP, VT) ||
TLI.isFPImmLegal(Recip, VT, ForCodeSize)))
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getConstantFP(Recip, DL, VT), Flags);
}
// If this FDIV is part of a reciprocal square root, it may be folded
// into a target-specific square root estimate instruction.
if (N1.getOpcode() == ISD::FSQRT) {
if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags)) {
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
}
} else if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
Flags)) {
RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
}
} else if (N1.getOpcode() == ISD::FP_ROUND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
Flags)) {
RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
}
} else if (N1.getOpcode() == ISD::FMUL) {
// Look through an FMUL. Even though this won't remove the FDIV directly,
// it's still worthwhile to get rid of the FSQRT if possible.
SDValue SqrtOp;
SDValue OtherOp;
if (N1.getOperand(0).getOpcode() == ISD::FSQRT) {
SqrtOp = N1.getOperand(0);
OtherOp = N1.getOperand(1);
} else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) {
SqrtOp = N1.getOperand(1);
OtherOp = N1.getOperand(0);
}
if (SqrtOp.getNode()) {
// We found a FSQRT, so try to make this fold:
// x / (y * sqrt(z)) -> x * (rsqrt(z) / y)
if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) {
RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags);
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
}
}
}
// Fold into a reciprocal estimate and multiply instead of a real divide.
if (SDValue RV = BuildReciprocalEstimate(N1, Flags)) {
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
}
}
// (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options,
ForCodeSize)) {
if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options,
ForCodeSize)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
return DAG.getNode(ISD::FDIV, SDLoc(N), VT,
GetNegatedExpression(N0, DAG, LegalOperations,
ForCodeSize),
GetNegatedExpression(N1, DAG, LegalOperations,
ForCodeSize),
Flags);
}
}
return SDValue();
}
SDValue DAGCombiner::visitFREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, N->getFlags());
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
return SDValue();
}
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
SDNodeFlags Flags = N->getFlags();
if (!DAG.getTarget().Options.UnsafeFPMath &&
!Flags.hasApproximateFuncs())
return SDValue();
SDValue N0 = N->getOperand(0);
if (TLI.isFsqrtCheap(N0, DAG))
return SDValue();
// FSQRT nodes have flags that propagate to the created nodes.
return buildSqrtEstimate(N0, Flags);
}
/// copysign(x, fp_extend(y)) -> copysign(x, y)
/// copysign(x, fp_round(y)) -> copysign(x, y)
static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
SDValue N1 = N->getOperand(1);
if ((N1.getOpcode() == ISD::FP_EXTEND ||
N1.getOpcode() == ISD::FP_ROUND)) {
// Do not optimize out type conversion of f128 type yet.
// For some targets like x86_64, configuration is changed to keep one f128
// value in one SSE register, but instruction selection cannot handle
// FCOPYSIGN on SSE registers yet.
EVT N1VT = N1->getValueType(0);
EVT N1Op0VT = N1->getOperand(0).getValueType();
return (N1VT == N1Op0VT || N1Op0VT != MVT::f128);
}
return false;
}
SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0);
bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
EVT VT = N->getValueType(0);
if (N0CFP && N1CFP) // Constant fold
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);
if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N->getOperand(1))) {
const APFloat &V = N1C->getValueAPF();
// copysign(x, c1) -> fabs(x) iff ispos(c1)
// copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
if (!V.isNegative()) {
if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
} else {
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
}
}
// copysign(fabs(x), y) -> copysign(x, y)
// copysign(fneg(x), y) -> copysign(x, y)
// copysign(copysign(x,z), y) -> copysign(x, y)
if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
N0.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1);
// copysign(x, abs(y)) -> abs(x)
if (N1.getOpcode() == ISD::FABS)
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
// copysign(x, copysign(y,z)) -> copysign(x, z)
if (N1.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1));
// copysign(x, fp_extend(y)) -> copysign(x, y)
// copysign(x, fp_round(y)) -> copysign(x, y)
if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitFPOW(SDNode *N) {
ConstantFPSDNode *ExponentC = isConstOrConstSplatFP(N->getOperand(1));
if (!ExponentC)
return SDValue();
// Try to convert x ** (1/3) into cube root.
// TODO: Handle the various flavors of long double.
// TODO: Since we're approximating, we don't need an exact 1/3 exponent.
// Some range near 1/3 should be fine.
EVT VT = N->getValueType(0);
if ((VT == MVT::f32 && ExponentC->getValueAPF().isExactlyValue(1.0f/3.0f)) ||
(VT == MVT::f64 && ExponentC->getValueAPF().isExactlyValue(1.0/3.0))) {
// pow(-0.0, 1/3) = +0.0; cbrt(-0.0) = -0.0.
// pow(-inf, 1/3) = +inf; cbrt(-inf) = -inf.
// pow(-val, 1/3) = nan; cbrt(-val) = -num.
// For regular numbers, rounding may cause the results to differ.
// Therefore, we require { nsz ninf nnan afn } for this transform.
// TODO: We could select out the special cases if we don't have nsz/ninf.
SDNodeFlags Flags = N->getFlags();
if (!Flags.hasNoSignedZeros() || !Flags.hasNoInfs() || !Flags.hasNoNaNs() ||
!Flags.hasApproximateFuncs())
return SDValue();
// Do not create a cbrt() libcall if the target does not have it, and do not
// turn a pow that has lowering support into a cbrt() libcall.
if (!DAG.getLibInfo().has(LibFunc_cbrt) ||
(!DAG.getTargetLoweringInfo().isOperationExpand(ISD::FPOW, VT) &&
DAG.getTargetLoweringInfo().isOperationExpand(ISD::FCBRT, VT)))
return SDValue();
return DAG.getNode(ISD::FCBRT, SDLoc(N), VT, N->getOperand(0), Flags);
}
// Try to convert x ** (1/4) and x ** (3/4) into square roots.
// x ** (1/2) is canonicalized to sqrt, so we do not bother with that case.
// TODO: This could be extended (using a target hook) to handle smaller
// power-of-2 fractional exponents.
bool ExponentIs025 = ExponentC->getValueAPF().isExactlyValue(0.25);
bool ExponentIs075 = ExponentC->getValueAPF().isExactlyValue(0.75);
if (ExponentIs025 || ExponentIs075) {
// pow(-0.0, 0.25) = +0.0; sqrt(sqrt(-0.0)) = -0.0.
// pow(-inf, 0.25) = +inf; sqrt(sqrt(-inf)) = NaN.
// pow(-0.0, 0.75) = +0.0; sqrt(-0.0) * sqrt(sqrt(-0.0)) = +0.0.
// pow(-inf, 0.75) = +inf; sqrt(-inf) * sqrt(sqrt(-inf)) = NaN.
// For regular numbers, rounding may cause the results to differ.
// Therefore, we require { nsz ninf afn } for this transform.
// TODO: We could select out the special cases if we don't have nsz/ninf.
SDNodeFlags Flags = N->getFlags();
// We only need no signed zeros for the 0.25 case.
if ((!Flags.hasNoSignedZeros() && ExponentIs025) || !Flags.hasNoInfs() ||
!Flags.hasApproximateFuncs())
return SDValue();
// Don't double the number of libcalls. We are trying to inline fast code.
if (!DAG.getTargetLoweringInfo().isOperationLegalOrCustom(ISD::FSQRT, VT))
return SDValue();
// Assume that libcalls are the smallest code.
// TODO: This restriction should probably be lifted for vectors.
if (DAG.getMachineFunction().getFunction().hasOptSize())
return SDValue();
// pow(X, 0.25) --> sqrt(sqrt(X))
SDLoc DL(N);
SDValue Sqrt = DAG.getNode(ISD::FSQRT, DL, VT, N->getOperand(0), Flags);
SDValue SqrtSqrt = DAG.getNode(ISD::FSQRT, DL, VT, Sqrt, Flags);
if (ExponentIs025)
return SqrtSqrt;
// pow(X, 0.75) --> sqrt(X) * sqrt(sqrt(X))
return DAG.getNode(ISD::FMUL, DL, VT, Sqrt, SqrtSqrt, Flags);
}
return SDValue();
}
static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI) {
// This optimization is guarded by a function attribute because it may produce
// unexpected results. Ie, programs may be relying on the platform-specific
// undefined behavior when the float-to-int conversion overflows.
const Function &F = DAG.getMachineFunction().getFunction();
Attribute StrictOverflow = F.getFnAttribute("strict-float-cast-overflow");
if (StrictOverflow.getValueAsString().equals("false"))
return SDValue();
// We only do this if the target has legal ftrunc. Otherwise, we'd likely be
// replacing casts with a libcall. We also must be allowed to ignore -0.0
// because FTRUNC will return -0.0 for (-1.0, -0.0), but using integer
// conversions would return +0.0.
// FIXME: We should be able to use node-level FMF here.
// TODO: If strict math, should we use FABS (+ range check for signed cast)?
EVT VT = N->getValueType(0);
if (!TLI.isOperationLegal(ISD::FTRUNC, VT) ||
!DAG.getTarget().Options.NoSignedZerosFPMath)
return SDValue();
// fptosi/fptoui round towards zero, so converting from FP to integer and
// back is the same as an 'ftrunc': [us]itofp (fpto[us]i X) --> ftrunc X
SDValue N0 = N->getOperand(0);
if (N->getOpcode() == ISD::SINT_TO_FP && N0.getOpcode() == ISD::FP_TO_SINT &&
N0.getOperand(0).getValueType() == VT)
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
if (N->getOpcode() == ISD::UINT_TO_FP && N0.getOpcode() == ISD::FP_TO_UINT &&
N0.getOperand(0).getValueType() == VT)
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT OpVT = N0.getValueType();
// [us]itofp(undef) = 0, because the result value is bounded.
if (N0.isUndef())
return DAG.getConstantFP(0.0, SDLoc(N), VT);
// fold (sint_to_fp c1) -> c1fp
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
// If the input is a legal type, and SINT_TO_FP is not legal on this target,
// but UINT_TO_FP is legal on this target, try to convert.
if (!hasOperation(ISD::SINT_TO_FP, OpVT) &&
hasOperation(ISD::UINT_TO_FP, OpVT)) {
// If the sign bit is known to be zero, we can change this to UINT_TO_FP.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
}
// The next optimizations are desirable only if SELECT_CC can be lowered.
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
!VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
// fold (sint_to_fp (zext (setcc x, y, cc))) ->
// (select_cc x, y, 1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::ZERO_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(0).getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
}
if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
return FTrunc;
return SDValue();
}
SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT OpVT = N0.getValueType();
// [us]itofp(undef) = 0, because the result value is bounded.
if (N0.isUndef())
return DAG.getConstantFP(0.0, SDLoc(N), VT);
// fold (uint_to_fp c1) -> c1fp
if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
// If the input is a legal type, and UINT_TO_FP is not legal on this target,
// but SINT_TO_FP is legal on this target, try to convert.
if (!hasOperation(ISD::UINT_TO_FP, OpVT) &&
hasOperation(ISD::SINT_TO_FP, OpVT)) {
// If the sign bit is known to be zero, we can change this to SINT_TO_FP.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
}
// The next optimizations are desirable only if SELECT_CC can be lowered.
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
}
if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
return FTrunc;
return SDValue();
}
// Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x
static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP)
return SDValue();
SDValue Src = N0.getOperand(0);
EVT SrcVT = Src.getValueType();
bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP;
bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT;
// We can safely assume the conversion won't overflow the output range,
// because (for example) (uint8_t)18293.f is undefined behavior.
// Since we can assume the conversion won't overflow, our decision as to
// whether the input will fit in the float should depend on the minimum
// of the input range and output range.
// This means this is also safe for a signed input and unsigned output, since
// a negative input would lead to undefined behavior.
unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned;
unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned;
unsigned ActualSize = std::min(InputSize, OutputSize);
const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType());
// We can only fold away the float conversion if the input range can be
// represented exactly in the float range.
if (APFloat::semanticsPrecision(sem) >= ActualSize) {
if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) {
unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND;
return DAG.getNode(ExtOp, SDLoc(N), VT, Src);
}
if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits())
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src);
return DAG.getBitcast(VT, Src);
}
return SDValue();
}
SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fp_to_sint undef) -> undef
if (N0.isUndef())
return DAG.getUNDEF(VT);
// fold (fp_to_sint c1fp) -> c1
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
}
SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fp_to_uint undef) -> undef
if (N0.isUndef())
return DAG.getUNDEF(VT);
// fold (fp_to_uint c1fp) -> c1
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
}
SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
// fold (fp_round c1fp) -> c1fp
if (N0CFP)
return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);
// fold (fp_round (fp_extend x)) -> x
if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
return N0.getOperand(0);
// fold (fp_round (fp_round x)) -> (fp_round x)
if (N0.getOpcode() == ISD::FP_ROUND) {
const bool NIsTrunc = N->getConstantOperandVal(1) == 1;
const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1;
// Skip this folding if it results in an fp_round from f80 to f16.
//
// f80 to f16 always generates an expensive (and as yet, unimplemented)
// libcall to __truncxfhf2 instead of selecting native f16 conversion
// instructions from f32 or f64. Moreover, the first (value-preserving)
// fp_round from f80 to either f32 or f64 may become a NOP in platforms like
// x86.
if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16)
return SDValue();
// If the first fp_round isn't a value preserving truncation, it might
// introduce a tie in the second fp_round, that wouldn't occur in the
// single-step fp_round we want to fold to.
// In other words, double rounding isn't the same as rounding.
// Also, this is a value preserving truncation iff both fp_round's are.
if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
SDLoc DL(N);
return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0),
DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL));
}
}
// fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
N0.getOperand(0), N1);
AddToWorklist(Tmp.getNode());
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
Tmp, N0.getOperand(1));
}
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
return SDValue();
}
SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
// fold (fp_round_inreg c1fp) -> c1fp
if (N0CFP && isTypeLegal(EVT)) {
SDLoc DL(N);
SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT);
return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round);
}
return SDValue();
}
SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
if (N->hasOneUse() &&
N->use_begin()->getOpcode() == ISD::FP_ROUND)
return SDValue();
// fold (fp_extend c1fp) -> c1fp
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
// fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
if (N0.getOpcode() == ISD::FP16_TO_FP &&
TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));
// Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
// value of X.
if (N0.getOpcode() == ISD::FP_ROUND
&& N0.getConstantOperandVal(1) == 1) {
SDValue In = N0.getOperand(0);
if (In.getValueType() == VT) return In;
if (VT.bitsLT(In.getValueType()))
return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
In, N0.getOperand(1));
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
}
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
N0.getValueType(), ExtLoad,
DAG.getIntPtrConstant(1, SDLoc(N0))),
ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
return SDValue();
}
SDValue DAGCombiner::visitFCEIL(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fceil c1) -> fceil(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ftrunc c1) -> ftrunc(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
// fold ftrunc (known rounded int x) -> x
// ftrunc is a part of fptosi/fptoui expansion on some targets, so this is
// likely to be generated to extract integer from a rounded floating value.
switch (N0.getOpcode()) {
default: break;
case ISD::FRINT:
case ISD::FTRUNC:
case ISD::FNEARBYINT:
case ISD::FFLOOR:
case ISD::FCEIL:
return N0;
}
return SDValue();
}
SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ffloor c1) -> ffloor(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
return SDValue();
}
// FIXME: FNEG and FABS have a lot in common; refactor.
SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// Constant fold FNEG.
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
&DAG.getTarget().Options, ForCodeSize))
return GetNegatedExpression(N0, DAG, LegalOperations, ForCodeSize);
// Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading
// constant pool values.
if (!TLI.isFNegFree(VT) &&
N0.getOpcode() == ISD::BITCAST &&
N0.getNode()->hasOneUse()) {
SDValue Int = N0.getOperand(0);
EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
APInt SignMask;
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x80... per scalar element
// and splat it.
SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x80...
SignMask = APInt::getSignMask(IntVT.getSizeInBits());
}
SDLoc DL0(N0);
Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
DAG.getConstant(SignMask, DL0, IntVT));
AddToWorklist(Int.getNode());
return DAG.getBitcast(VT, Int);
}
}
// (fneg (fmul c, x)) -> (fmul -c, x)
if (N0.getOpcode() == ISD::FMUL &&
(N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) {
ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
if (CFP1) {
APFloat CVal = CFP1->getValueAPF();
CVal.changeSign();
if (Level >= AfterLegalizeDAG &&
(TLI.isFPImmLegal(CVal, VT, ForCodeSize) ||
TLI.isOperationLegal(ISD::ConstantFP, VT)))
return DAG.getNode(
ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)),
N0->getFlags());
}
}
return SDValue();
}
static SDValue visitFMinMax(SelectionDAG &DAG, SDNode *N,
APFloat (*Op)(const APFloat &, const APFloat &)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
if (N0CFP && N1CFP) {
const APFloat &C0 = N0CFP->getValueAPF();
const APFloat &C1 = N1CFP->getValueAPF();
return DAG.getConstantFP(Op(C0, C1), SDLoc(N), VT);
}
// Canonicalize to constant on RHS.
if (isConstantFPBuildVectorOrConstantFP(N0) &&
!isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
return SDValue();
}
SDValue DAGCombiner::visitFMINNUM(SDNode *N) {
return visitFMinMax(DAG, N, minnum);
}
SDValue DAGCombiner::visitFMAXNUM(SDNode *N) {
return visitFMinMax(DAG, N, maxnum);
}
SDValue DAGCombiner::visitFMINIMUM(SDNode *N) {
return visitFMinMax(DAG, N, minimum);
}
SDValue DAGCombiner::visitFMAXIMUM(SDNode *N) {
return visitFMinMax(DAG, N, maximum);
}
SDValue DAGCombiner::visitFABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fabs c1) -> fabs(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
// fold (fabs (fabs x)) -> (fabs x)
if (N0.getOpcode() == ISD::FABS)
return N->getOperand(0);
// fold (fabs (fneg x)) -> (fabs x)
// fold (fabs (fcopysign x, y)) -> (fabs x)
if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
// fabs(bitcast(x)) -> bitcast(x & ~sign) to avoid constant pool loads.
if (!TLI.isFAbsFree(VT) && N0.getOpcode() == ISD::BITCAST && N0.hasOneUse()) {
SDValue Int = N0.getOperand(0);
EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
APInt SignMask;
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x7f... per scalar element
// and splat it.
SignMask = ~APInt::getSignMask(N0.getScalarValueSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x7f...
SignMask = ~APInt::getSignMask(IntVT.getSizeInBits());
}
SDLoc DL(N0);
Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
DAG.getConstant(SignMask, DL, IntVT));
AddToWorklist(Int.getNode());
return DAG.getBitcast(N->getValueType(0), Int);
}
}
return SDValue();
}
SDValue DAGCombiner::visitBRCOND(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
// If N is a constant we could fold this into a fallthrough or unconditional
// branch. However that doesn't happen very often in normal code, because
// Instcombine/SimplifyCFG should have handled the available opportunities.
// If we did this folding here, it would be necessary to update the
// MachineBasicBlock CFG, which is awkward.
// fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
// on the target.
if (N1.getOpcode() == ISD::SETCC &&
TLI.isOperationLegalOrCustom(ISD::BR_CC,
N1.getOperand(0).getValueType())) {
return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
Chain, N1.getOperand(2),
N1.getOperand(0), N1.getOperand(1), N2);
}
if (N1.hasOneUse()) {
if (SDValue NewN1 = rebuildSetCC(N1))
return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain, NewN1, N2);
}
return SDValue();
}
SDValue DAGCombiner::rebuildSetCC(SDValue N) {
if (N.getOpcode() == ISD::SRL ||
(N.getOpcode() == ISD::TRUNCATE &&
(N.getOperand(0).hasOneUse() &&
N.getOperand(0).getOpcode() == ISD::SRL))) {
// Look pass the truncate.
if (N.getOpcode() == ISD::TRUNCATE)
N = N.getOperand(0);
// Match this pattern so that we can generate simpler code:
//
// %a = ...
// %b = and i32 %a, 2
// %c = srl i32 %b, 1
// brcond i32 %c ...
//
// into
//
// %a = ...
// %b = and i32 %a, 2
// %c = setcc eq %b, 0
// brcond %c ...
//
// This applies only when the AND constant value has one bit set and the
// SRL constant is equal to the log2 of the AND constant. The back-end is
// smart enough to convert the result into a TEST/JMP sequence.
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::Constant) {
SDValue AndOp1 = Op0.getOperand(1);
if (AndOp1.getOpcode() == ISD::Constant) {
const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
if (AndConst.isPowerOf2() &&
cast<ConstantSDNode>(Op1)->getAPIntValue() == AndConst.logBase2()) {
SDLoc DL(N);
return DAG.getSetCC(DL, getSetCCResultType(Op0.getValueType()),
Op0, DAG.getConstant(0, DL, Op0.getValueType()),
ISD::SETNE);
}
}
}
}
// Transform br(xor(x, y)) -> br(x != y)
// Transform br(xor(xor(x,y), 1)) -> br (x == y)
if (N.getOpcode() == ISD::XOR) {
// Because we may call this on a speculatively constructed
// SimplifiedSetCC Node, we need to simplify this node first.
// Ideally this should be folded into SimplifySetCC and not
// here. For now, grab a handle to N so we don't lose it from
// replacements interal to the visit.
HandleSDNode XORHandle(N);
while (N.getOpcode() == ISD::XOR) {
SDValue Tmp = visitXOR(N.getNode());
// No simplification done.
if (!Tmp.getNode())
break;
// Returning N is form in-visit replacement that may invalidated
// N. Grab value from Handle.
if (Tmp.getNode() == N.getNode())
N = XORHandle.getValue();
else // Node simplified. Try simplifying again.
N = Tmp;
}
if (N.getOpcode() != ISD::XOR)
return N;
SDNode *TheXor = N.getNode();
SDValue Op0 = TheXor->getOperand(0);
SDValue Op1 = TheXor->getOperand(1);
if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
bool Equal = false;
if (isOneConstant(Op0) && Op0.hasOneUse() &&
Op0.getOpcode() == ISD::XOR) {
TheXor = Op0.getNode();
Equal = true;
}
EVT SetCCVT = N.getValueType();
if (LegalTypes)
SetCCVT = getSetCCResultType(SetCCVT);
// Replace the uses of XOR with SETCC
return DAG.getSetCC(SDLoc(TheXor), SetCCVT, Op0, Op1,
Equal ? ISD::SETEQ : ISD::SETNE);
}
}
return SDValue();
}
// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
//
SDValue DAGCombiner::visitBR_CC(SDNode *N) {
CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
// If N is a constant we could fold this into a fallthrough or unconditional
// branch. However that doesn't happen very often in normal code, because
// Instcombine/SimplifyCFG should have handled the available opportunities.
// If we did this folding here, it would be necessary to update the
// MachineBasicBlock CFG, which is awkward.
// Use SimplifySetCC to simplify SETCC's.
SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
CondLHS, CondRHS, CC->get(), SDLoc(N),
false);
if (Simp.getNode()) AddToWorklist(Simp.getNode());
// fold to a simpler setcc
if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
N->getOperand(0), Simp.getOperand(2),
Simp.getOperand(0), Simp.getOperand(1),
N->getOperand(4));
return SDValue();
}
/// Return true if 'Use' is a load or a store that uses N as its base pointer
/// and that N may be folded in the load / store addressing mode.
static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
SelectionDAG &DAG,
const TargetLowering &TLI) {
EVT VT;
unsigned AS;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
return false;
VT = LD->getMemoryVT();
AS = LD->getAddressSpace();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
return false;
VT = ST->getMemoryVT();
AS = ST->getAddressSpace();
} else
return false;
TargetLowering::AddrMode AM;
if (N->getOpcode() == ISD::ADD) {
AM.HasBaseReg = true;
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Offset)
// [reg +/- imm]
AM.BaseOffs = Offset->getSExtValue();
else
// [reg +/- reg]
AM.Scale = 1;
} else if (N->getOpcode() == ISD::SUB) {
AM.HasBaseReg = true;
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Offset)
// [reg +/- imm]
AM.BaseOffs = -Offset->getSExtValue();
else
// [reg +/- reg]
AM.Scale = 1;
} else
return false;
return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
VT.getTypeForEVT(*DAG.getContext()), AS);
}
/// Try turning a load/store into a pre-indexed load/store when the base
/// pointer is an add or subtract and it has other uses besides the load/store.
/// After the transformation, the new indexed load/store has effectively folded
/// the add/subtract in and all of its other uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
SDValue Ptr;
EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
VT = LD->getMemoryVT();
if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
!TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
if (ST->isIndexed())
return false;
VT = ST->getMemoryVT();
if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
!TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
return false;
Ptr = ST->getBasePtr();
isLoad = false;
} else {
return false;
}
// If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
// out. There is no reason to make this a preinc/predec.
if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
Ptr.getNode()->hasOneUse())
return false;
// Ask the target to do addressing mode selection.
SDValue BasePtr;
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
return false;
// Backends without true r+i pre-indexed forms may need to pass a
// constant base with a variable offset so that constant coercion
// will work with the patterns in canonical form.
bool Swapped = false;
if (isa<ConstantSDNode>(BasePtr)) {
std::swap(BasePtr, Offset);
Swapped = true;
}
// Don't create a indexed load / store with zero offset.
if (isNullConstant(Offset))
return false;
// Try turning it into a pre-indexed load / store except when:
// 1) The new base ptr is a frame index.
// 2) If N is a store and the new base ptr is either the same as or is a
// predecessor of the value being stored.
// 3) Another use of old base ptr is a predecessor of N. If ptr is folded
// that would create a cycle.
// 4) All uses are load / store ops that use it as old base ptr.
// Check #1. Preinc'ing a frame index would require copying the stack pointer
// (plus the implicit offset) to a register to preinc anyway.
if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
return false;
// Check #2.
if (!isLoad) {
SDValue Val = cast<StoreSDNode>(N)->getValue();
// Would require a copy.
if (Val == BasePtr)
return false;
// Would create a cycle.
if (Val == Ptr || Ptr->isPredecessorOf(Val.getNode()))
return false;
}
// Caches for hasPredecessorHelper.
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
Worklist.push_back(N);
// If the offset is a constant, there may be other adds of constants that
// can be folded with this one. We should do this to avoid having to keep
// a copy of the original base pointer.
SmallVector<SDNode *, 16> OtherUses;
if (isa<ConstantSDNode>(Offset))
for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
UE = BasePtr.getNode()->use_end();
UI != UE; ++UI) {
SDUse &Use = UI.getUse();
// Skip the use that is Ptr and uses of other results from BasePtr's
// node (important for nodes that return multiple results).
if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
continue;
if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist))
continue;
if (Use.getUser()->getOpcode() != ISD::ADD &&
Use.getUser()->getOpcode() != ISD::SUB) {
OtherUses.clear();
break;
}
SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
if (!isa<ConstantSDNode>(Op1)) {
OtherUses.clear();
break;
}
// FIXME: In some cases, we can be smarter about this.
if (Op1.getValueType() != Offset.getValueType()) {
OtherUses.clear();
break;
}
OtherUses.push_back(Use.getUser());
}
if (Swapped)
std::swap(BasePtr, Offset);
// Now check for #3 and #4.
bool RealUse = false;
for (SDNode *Use : Ptr.getNode()->uses()) {
if (Use == N)
continue;
if (SDNode::hasPredecessorHelper(Use, Visited, Worklist))
return false;
// If Ptr may be folded in addressing mode of other use, then it's
// not profitable to do this transformation.
if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
RealUse = true;
}
if (!RealUse)
return false;
SDValue Result;
if (isLoad)
Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
else
Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
++PreIndexedNodes;
++NodesCombined;
LLVM_DEBUG(dbgs() << "\nReplacing.4 "; N->dump(&DAG); dbgs() << "\nWith: ";
Result.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
deleteAndRecombine(N);
if (Swapped)
std::swap(BasePtr, Offset);
// Replace other uses of BasePtr that can be updated to use Ptr
for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
unsigned OffsetIdx = 1;
if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
OffsetIdx = 0;
assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==
BasePtr.getNode() && "Expected BasePtr operand");
// We need to replace ptr0 in the following expression:
// x0 * offset0 + y0 * ptr0 = t0
// knowing that
// x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
//
// where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
// indexed load/store and the expression that needs to be re-written.
//
// Therefore, we have:
// t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
ConstantSDNode *CN =
cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
int X0, X1, Y0, Y1;
const APInt &Offset0 = CN->getAPIntValue();
APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
APInt CNV = Offset0;
if (X0 < 0) CNV = -CNV;
if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
else CNV = CNV - Offset1;
SDLoc DL(OtherUses[i]);
// We can now generate the new expression.
SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0));
SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0);
SDValue NewUse = DAG.getNode(Opcode,
DL,
OtherUses[i]->getValueType(0), NewOp1, NewOp2);
DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
deleteAndRecombine(OtherUses[i]);
}
// Replace the uses of Ptr with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
deleteAndRecombine(Ptr.getNode());
AddToWorklist(Result.getNode());
return true;
}
/// Try to combine a load/store with a add/sub of the base pointer node into a
/// post-indexed load/store. The transformation folded the add/subtract into the
/// new indexed load/store effectively and all of its uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
SDValue Ptr;
EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
VT = LD->getMemoryVT();
if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
!TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
if (ST->isIndexed())
return false;
VT = ST->getMemoryVT();
if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
!TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
return false;
Ptr = ST->getBasePtr();
isLoad = false;
} else {
return false;
}
if (Ptr.getNode()->hasOneUse())
return false;
for (SDNode *Op : Ptr.getNode()->uses()) {
if (Op == N ||
(Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
continue;
SDValue BasePtr;
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
// Don't create a indexed load / store with zero offset.
if (isNullConstant(Offset))
continue;
// Try turning it into a post-indexed load / store except when
// 1) All uses are load / store ops that use it as base ptr (and
// it may be folded as addressing mmode).
// 2) Op must be independent of N, i.e. Op is neither a predecessor
// nor a successor of N. Otherwise, if Op is folded that would
// create a cycle.
if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
continue;
// Check for #1.
bool TryNext = false;
for (SDNode *Use : BasePtr.getNode()->uses()) {
if (Use == Ptr.getNode())
continue;
// If all the uses are load / store addresses, then don't do the
// transformation.
if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
bool RealUse = false;
for (SDNode *UseUse : Use->uses()) {
if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
RealUse = true;
}
if (!RealUse) {
TryNext = true;
break;
}
}
}
if (TryNext)
continue;
// Check for #2.
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 8> Worklist;
// Ptr is predecessor to both N and Op.
Visited.insert(Ptr.getNode());
Worklist.push_back(N);
Worklist.push_back(Op);
if (!SDNode::hasPredecessorHelper(N, Visited, Worklist) &&
!SDNode::hasPredecessorHelper(Op, Visited, Worklist)) {
SDValue Result = isLoad
? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM)
: DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
++PostIndexedNodes;
++NodesCombined;
LLVM_DEBUG(dbgs() << "\nReplacing.5 "; N->dump(&DAG);
dbgs() << "\nWith: "; Result.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
deleteAndRecombine(N);
// Replace the uses of Use with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
Result.getValue(isLoad ? 1 : 0));
deleteAndRecombine(Op);
return true;
}
}
}
return false;
}
/// Return the base-pointer arithmetic from an indexed \p LD.
SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
ISD::MemIndexedMode AM = LD->getAddressingMode();
assert(AM != ISD::UNINDEXED);
SDValue BP = LD->getOperand(1);
SDValue Inc = LD->getOperand(2);
// Some backends use TargetConstants for load offsets, but don't expect
// TargetConstants in general ADD nodes. We can convert these constants into
// regular Constants (if the constant is not opaque).
assert((Inc.getOpcode() != ISD::TargetConstant ||
!cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants");
if (Inc.getOpcode() == ISD::TargetConstant) {
ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc),
ConstInc->getValueType(0));
}
unsigned Opc =
(AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
}
static inline int numVectorEltsOrZero(EVT T) {
return T.isVector() ? T.getVectorNumElements() : 0;
}
bool DAGCombiner::getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val) {
Val = ST->getValue();
EVT STType = Val.getValueType();
EVT STMemType = ST->getMemoryVT();
if (STType == STMemType)
return true;
if (isTypeLegal(STMemType))
return false; // fail.
if (STType.isFloatingPoint() && STMemType.isFloatingPoint() &&
TLI.isOperationLegal(ISD::FTRUNC, STMemType)) {
Val = DAG.getNode(ISD::FTRUNC, SDLoc(ST), STMemType, Val);
return true;
}
if (numVectorEltsOrZero(STType) == numVectorEltsOrZero(STMemType) &&
STType.isInteger() && STMemType.isInteger()) {
Val = DAG.getNode(ISD::TRUNCATE, SDLoc(ST), STMemType, Val);
return true;
}
if (STType.getSizeInBits() == STMemType.getSizeInBits()) {
Val = DAG.getBitcast(STMemType, Val);
return true;
}
return false; // fail.
}
bool DAGCombiner::extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val) {
EVT LDMemType = LD->getMemoryVT();
EVT LDType = LD->getValueType(0);
assert(Val.getValueType() == LDMemType &&
"Attempting to extend value of non-matching type");
if (LDType == LDMemType)
return true;
if (LDMemType.isInteger() && LDType.isInteger()) {
switch (LD->getExtensionType()) {
case ISD::NON_EXTLOAD:
Val = DAG.getBitcast(LDType, Val);
return true;
case ISD::EXTLOAD:
Val = DAG.getNode(ISD::ANY_EXTEND, SDLoc(LD), LDType, Val);
return true;
case ISD::SEXTLOAD:
Val = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(LD), LDType, Val);
return true;
case ISD::ZEXTLOAD:
Val = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(LD), LDType, Val);
return true;
}
}
return false;
}
SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
if (OptLevel == CodeGenOpt::None || LD->isVolatile())
return SDValue();
SDValue Chain = LD->getOperand(0);
StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain.getNode());
if (!ST || ST->isVolatile())
return SDValue();
EVT LDType = LD->getValueType(0);
EVT LDMemType = LD->getMemoryVT();
EVT STMemType = ST->getMemoryVT();
EVT STType = ST->getValue().getValueType();
BaseIndexOffset BasePtrLD = BaseIndexOffset::match(LD, DAG);
BaseIndexOffset BasePtrST = BaseIndexOffset::match(ST, DAG);
int64_t Offset;
if (!BasePtrST.equalBaseIndex(BasePtrLD, DAG, Offset))
return SDValue();
// Normalize for Endianness. After this Offset=0 will denote that the least
// significant bit in the loaded value maps to the least significant bit in
// the stored value). With Offset=n (for n > 0) the loaded value starts at the
// n:th least significant byte of the stored value.
if (DAG.getDataLayout().isBigEndian())
Offset = (STMemType.getStoreSizeInBits() -
LDMemType.getStoreSizeInBits()) / 8 - Offset;
// Check that the stored value cover all bits that are loaded.
bool STCoversLD =
(Offset >= 0) &&
(Offset * 8 + LDMemType.getSizeInBits() <= STMemType.getSizeInBits());
auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
if (LD->isIndexed()) {
bool IsSub = (LD->getAddressingMode() == ISD::PRE_DEC ||
LD->getAddressingMode() == ISD::POST_DEC);
unsigned Opc = IsSub ? ISD::SUB : ISD::ADD;
SDValue Idx = DAG.getNode(Opc, SDLoc(LD), LD->getOperand(1).getValueType(),
LD->getOperand(1), LD->getOperand(2));
SDValue Ops[] = {Val, Idx, Chain};
return CombineTo(LD, Ops, 3);
}
return CombineTo(LD, Val, Chain);
};
if (!STCoversLD)
return SDValue();
// Memory as copy space (potentially masked).
if (Offset == 0 && LDType == STType && STMemType == LDMemType) {
// Simple case: Direct non-truncating forwarding
if (LDType.getSizeInBits() == LDMemType.getSizeInBits())
return ReplaceLd(LD, ST->getValue(), Chain);
// Can we model the truncate and extension with an and mask?
if (STType.isInteger() && LDMemType.isInteger() && !STType.isVector() &&
!LDMemType.isVector() && LD->getExtensionType() != ISD::SEXTLOAD) {
// Mask to size of LDMemType
auto Mask =
DAG.getConstant(APInt::getLowBitsSet(STType.getSizeInBits(),
STMemType.getSizeInBits()),
SDLoc(ST), STType);
auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask);
return ReplaceLd(LD, Val, Chain);
}
}
// TODO: Deal with nonzero offset.
if (LD->getBasePtr().isUndef() || Offset != 0)
return SDValue();
// Model necessary truncations / extenstions.
SDValue Val;
// Truncate Value To Stored Memory Size.
do {
if (!getTruncatedStoreValue(ST, Val))
continue;
if (!isTypeLegal(LDMemType))
continue;
if (STMemType != LDMemType) {
// TODO: Support vectors? This requires extract_subvector/bitcast.
if (!STMemType.isVector() && !LDMemType.isVector() &&
STMemType.isInteger() && LDMemType.isInteger())
Val = DAG.getNode(ISD::TRUNCATE, SDLoc(LD), LDMemType, Val);
else
continue;
}
if (!extendLoadedValueToExtension(LD, Val))
continue;
return ReplaceLd(LD, Val, Chain);
} while (false);
// On failure, cleanup dead nodes we may have created.
if (Val->use_empty())
deleteAndRecombine(Val.getNode());
return SDValue();
}
SDValue DAGCombiner::visitLOAD(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
// If load is not volatile and there are no uses of the loaded value (and
// the updated indexed value in case of indexed loads), change uses of the
// chain value into uses of the chain input (i.e. delete the dead load).
if (!LD->isVolatile()) {
if (N->getValueType(1) == MVT::Other) {
// Unindexed loads.
if (!N->hasAnyUseOfValue(0)) {
// It's not safe to use the two value CombineTo variant here. e.g.
// v1, chain2 = load chain1, loc
// v2, chain3 = load chain2, loc
// v3 = add v2, c
// Now we replace use of chain2 with chain1. This makes the second load
// isomorphic to the one we are deleting, and thus makes this load live.
LLVM_DEBUG(dbgs() << "\nReplacing.6 "; N->dump(&DAG);
dbgs() << "\nWith chain: "; Chain.getNode()->dump(&DAG);
dbgs() << "\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
AddUsersToWorklist(Chain.getNode());
if (N->use_empty())
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
} else {
// Indexed loads.
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
// If this load has an opaque TargetConstant offset, then we cannot split
// the indexing into an add/sub directly (that TargetConstant may not be
// valid for a different type of node, and we cannot convert an opaque
// target constant into a regular constant).
bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();
if (!N->hasAnyUseOfValue(0) &&
((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
SDValue Index;
if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
Index = SplitIndexingFromLoad(LD);
// Try to fold the base pointer arithmetic into subsequent loads and
// stores.
AddUsersToWorklist(N);
} else
Index = DAG.getUNDEF(N->getValueType(1));
LLVM_DEBUG(dbgs() << "\nReplacing.7 "; N->dump(&DAG);
dbgs() << "\nWith: "; Undef.getNode()->dump(&DAG);
dbgs() << " and 2 other values\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
// If this load is directly stored, replace the load value with the stored
// value.
if (auto V = ForwardStoreValueToDirectLoad(LD))
return V;
// Try to infer better alignment information than the load already has.
if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > LD->getAlignment() && LD->getSrcValueOffset() % Align == 0) {
SDValue NewLoad = DAG.getExtLoad(
LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr,
LD->getPointerInfo(), LD->getMemoryVT(), Align,
LD->getMemOperand()->getFlags(), LD->getAAInfo());
// NewLoad will always be N as we are only refining the alignment
assert(NewLoad.getNode() == N);
(void)NewLoad;
}
}
}
if (LD->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(LD, Chain);
// If there is a better chain.
if (Chain != BetterChain) {
SDValue ReplLoad;
// Replace the chain to void dependency.
if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
BetterChain, Ptr, LD->getMemOperand());
} else {
ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
LD->getValueType(0),
BetterChain, Ptr, LD->getMemoryVT(),
LD->getMemOperand());
}
// Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
MVT::Other, Chain, ReplLoad.getValue(1));
// Replace uses with load result and token factor
return CombineTo(N, ReplLoad.getValue(0), Token);
}
}
// Try transforming N to an indexed load.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
// Try to slice up N to more direct loads if the slices are mapped to
// different register banks or pairing can take place.
if (SliceUpLoad(N))
return SDValue(N, 0);
return SDValue();
}
namespace {
/// Helper structure used to slice a load in smaller loads.
/// Basically a slice is obtained from the following sequence:
/// Origin = load Ty1, Base
/// Shift = srl Ty1 Origin, CstTy Amount
/// Inst = trunc Shift to Ty2
///
/// Then, it will be rewritten into:
/// Slice = load SliceTy, Base + SliceOffset
/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
///
/// SliceTy is deduced from the number of bits that are actually used to
/// build Inst.
struct LoadedSlice {
/// Helper structure used to compute the cost of a slice.
struct Cost {
/// Are we optimizing for code size.
bool ForCodeSize;
/// Various cost.
unsigned Loads = 0;
unsigned Truncates = 0;
unsigned CrossRegisterBanksCopies = 0;
unsigned ZExts = 0;
unsigned Shift = 0;
Cost(bool ForCodeSize = false) : ForCodeSize(ForCodeSize) {}
/// Get the cost of one isolated slice.
Cost(const LoadedSlice &LS, bool ForCodeSize = false)
: ForCodeSize(ForCodeSize), Loads(1) {
EVT TruncType = LS.Inst->getValueType(0);
EVT LoadedType = LS.getLoadedType();
if (TruncType != LoadedType &&
!LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
ZExts = 1;
}
/// Account for slicing gain in the current cost.
/// Slicing provide a few gains like removing a shift or a
/// truncate. This method allows to grow the cost of the original
/// load with the gain from this slice.
void addSliceGain(const LoadedSlice &LS) {
// Each slice saves a truncate.
const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(),
LS.Inst->getValueType(0)))
++Truncates;
// If there is a shift amount, this slice gets rid of it.
if (LS.Shift)
++Shift;
// If this slice can merge a cross register bank copy, account for it.
if (LS.canMergeExpensiveCrossRegisterBankCopy())
++CrossRegisterBanksCopies;
}
Cost &operator+=(const Cost &RHS) {
Loads += RHS.Loads;
Truncates += RHS.Truncates;
CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
ZExts += RHS.ZExts;
Shift += RHS.Shift;
return *this;
}
bool operator==(const Cost &RHS) const {
return Loads == RHS.Loads && Truncates == RHS.Truncates &&
CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
ZExts == RHS.ZExts && Shift == RHS.Shift;
}
bool operator!=(const Cost &RHS) const { return !(*this == RHS); }
bool operator<(const Cost &RHS) const {
// Assume cross register banks copies are as expensive as loads.
// FIXME: Do we want some more target hooks?
unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
// Unless we are optimizing for code size, consider the
// expensive operation first.
if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
return ExpensiveOpsLHS < ExpensiveOpsRHS;
return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
(RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
}
bool operator>(const Cost &RHS) const { return RHS < *this; }
bool operator<=(const Cost &RHS) const { return !(RHS < *this); }
bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
};
// The last instruction that represent the slice. This should be a
// truncate instruction.
SDNode *Inst;
// The original load instruction.
LoadSDNode *Origin;
// The right shift amount in bits from the original load.
unsigned Shift;
// The DAG from which Origin came from.
// This is used to get some contextual information about legal types, etc.
SelectionDAG *DAG;
LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
unsigned Shift = 0, SelectionDAG *DAG = nullptr)
: Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
/// Get the bits used in a chunk of bits \p BitWidth large.
/// \return Result is \p BitWidth and has used bits set to 1 and
/// not used bits set to 0.
APInt getUsedBits() const {
// Reproduce the trunc(lshr) sequence:
// - Start from the truncated value.
// - Zero extend to the desired bit width.
// - Shift left.
assert(Origin && "No original load to compare against.");
unsigned BitWidth = Origin->getValueSizeInBits(0);
assert(Inst && "This slice is not bound to an instruction");
assert(Inst->getValueSizeInBits(0) <= BitWidth &&
"Extracted slice is bigger than the whole type!");
APInt UsedBits(Inst->getValueSizeInBits(0), 0);
UsedBits.setAllBits();
UsedBits = UsedBits.zext(BitWidth);
UsedBits <<= Shift;
return UsedBits;
}
/// Get the size of the slice to be loaded in bytes.
unsigned getLoadedSize() const {
unsigned SliceSize = getUsedBits().countPopulation();
assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.");
return SliceSize / 8;
}
/// Get the type that will be loaded for this slice.
/// Note: This may not be the final type for the slice.
EVT getLoadedType() const {
assert(DAG && "Missing context");
LLVMContext &Ctxt = *DAG->getContext();
return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
}
/// Get the alignment of the load used for this slice.
unsigned getAlignment() const {
unsigned Alignment = Origin->getAlignment();
uint64_t Offset = getOffsetFromBase();
if (Offset != 0)
Alignment = MinAlign(Alignment, Alignment + Offset);
return Alignment;
}
/// Check if this slice can be rewritten with legal operations.
bool isLegal() const {
// An invalid slice is not legal.
if (!Origin || !Inst || !DAG)
return false;
// Offsets are for indexed load only, we do not handle that.
if (!Origin->getOffset().isUndef())
return false;
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
// Check that the type is legal.
EVT SliceType = getLoadedType();
if (!TLI.isTypeLegal(SliceType))
return false;
// Check that the load is legal for this type.
if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
return false;
// Check that the offset can be computed.
// 1. Check its type.
EVT PtrType = Origin->getBasePtr().getValueType();
if (PtrType == MVT::Untyped || PtrType.isExtended())
return false;
// 2. Check that it fits in the immediate.
if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
return false;
// 3. Check that the computation is legal.
if (!TLI.isOperationLegal(ISD::ADD, PtrType))
return false;
// Check that the zext is legal if it needs one.
EVT TruncateType = Inst->getValueType(0);
if (TruncateType != SliceType &&
!TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
return false;
return true;
}
/// Get the offset in bytes of this slice in the original chunk of
/// bits.
/// \pre DAG != nullptr.
uint64_t getOffsetFromBase() const {
assert(DAG && "Missing context.");
bool IsBigEndian = DAG->getDataLayout().isBigEndian();
assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
uint64_t Offset = Shift / 8;
unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
assert(!(Origin->getValueSizeInBits(0) & 0x7) &&
"The size of the original loaded type is not a multiple of a"
" byte.");
// If Offset is bigger than TySizeInBytes, it means we are loading all
// zeros. This should have been optimized before in the process.
assert(TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size");
if (IsBigEndian)
Offset = TySizeInBytes - Offset - getLoadedSize();
return Offset;
}
/// Generate the sequence of instructions to load the slice
/// represented by this object and redirect the uses of this slice to
/// this new sequence of instructions.
/// \pre this->Inst && this->Origin are valid Instructions and this
/// object passed the legal check: LoadedSlice::isLegal returned true.
/// \return The last instruction of the sequence used to load the slice.
SDValue loadSlice() const {
assert(Inst && Origin && "Unable to replace a non-existing slice.");
const SDValue &OldBaseAddr = Origin->getBasePtr();
SDValue BaseAddr = OldBaseAddr;
// Get the offset in that chunk of bytes w.r.t. the endianness.
int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
assert(Offset >= 0 && "Offset too big to fit in int64_t!");
if (Offset) {
// BaseAddr = BaseAddr + Offset.
EVT ArithType = BaseAddr.getValueType();
SDLoc DL(Origin);
BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr,
DAG->getConstant(Offset, DL, ArithType));
}
// Create the type of the loaded slice according to its size.
EVT SliceType = getLoadedType();
// Create the load for the slice.
SDValue LastInst =
DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
Origin->getPointerInfo().getWithOffset(Offset),
getAlignment(), Origin->getMemOperand()->getFlags());
// If the final type is not the same as the loaded type, this means that
// we have to pad with zero. Create a zero extend for that.
EVT FinalType = Inst->getValueType(0);
if (SliceType != FinalType)
LastInst =
DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
return LastInst;
}
/// Check if this slice can be merged with an expensive cross register
/// bank copy. E.g.,
/// i = load i32
/// f = bitcast i32 i to float
bool canMergeExpensiveCrossRegisterBankCopy() const {
if (!Inst || !Inst->hasOneUse())
return false;
SDNode *Use = *Inst->use_begin();
if (Use->getOpcode() != ISD::BITCAST)
return false;
assert(DAG && "Missing context");
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
EVT ResVT = Use->getValueType(0);
const TargetRegisterClass *ResRC =
TLI.getRegClassFor(ResVT.getSimpleVT(), Use->isDivergent());
const TargetRegisterClass *ArgRC =
TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT(),
Use->getOperand(0)->isDivergent());
if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
return false;
// At this point, we know that we perform a cross-register-bank copy.
// Check if it is expensive.
const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo();
// Assume bitcasts are cheap, unless both register classes do not
// explicitly share a common sub class.
if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
return false;
// Check if it will be merged with the load.
// 1. Check the alignment constraint.
unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
ResVT.getTypeForEVT(*DAG->getContext()));
if (RequiredAlignment > getAlignment())
return false;
// 2. Check that the load is a legal operation for that type.
if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
return false;
// 3. Check that we do not have a zext in the way.
if (Inst->getValueType(0) != getLoadedType())
return false;
return true;
}
};
} // end anonymous namespace
/// Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
static bool areUsedBitsDense(const APInt &UsedBits) {
// If all the bits are one, this is dense!
if (UsedBits.isAllOnesValue())
return true;
// Get rid of the unused bits on the right.
APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
// Get rid of the unused bits on the left.
if (NarrowedUsedBits.countLeadingZeros())
NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
// Check that the chunk of bits is completely used.
return NarrowedUsedBits.isAllOnesValue();
}
/// Check whether or not \p First and \p Second are next to each other
/// in memory. This means that there is no hole between the bits loaded
/// by \p First and the bits loaded by \p Second.
static bool areSlicesNextToEachOther(const LoadedSlice &First,
const LoadedSlice &Second) {
assert(First.Origin == Second.Origin && First.Origin &&
"Unable to match different memory origins.");
APInt UsedBits = First.getUsedBits();
assert((UsedBits & Second.getUsedBits()) == 0 &&
"Slices are not supposed to overlap.");
UsedBits |= Second.getUsedBits();
return areUsedBitsDense(UsedBits);
}
/// Adjust the \p GlobalLSCost according to the target
/// paring capabilities and the layout of the slices.
/// \pre \p GlobalLSCost should account for at least as many loads as
/// there is in the slices in \p LoadedSlices.
static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
LoadedSlice::Cost &GlobalLSCost) {
unsigned NumberOfSlices = LoadedSlices.size();
// If there is less than 2 elements, no pairing is possible.
if (NumberOfSlices < 2)
return;
// Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list.
llvm::sort(LoadedSlices, [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
});
const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
// First (resp. Second) is the first (resp. Second) potentially candidate
// to be placed in a paired load.
const LoadedSlice *First = nullptr;
const LoadedSlice *Second = nullptr;
for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
// Set the beginning of the pair.
First = Second) {
Second = &LoadedSlices[CurrSlice];
// If First is NULL, it means we start a new pair.
// Get to the next slice.
if (!First)
continue;
EVT LoadedType = First->getLoadedType();
// If the types of the slices are different, we cannot pair them.
if (LoadedType != Second->getLoadedType())
continue;
// Check if the target supplies paired loads for this type.
unsigned RequiredAlignment = 0;
if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
// move to the next pair, this type is hopeless.
Second = nullptr;
continue;
}
// Check if we meet the alignment requirement.
if (RequiredAlignment > First->getAlignment())
continue;
// Check that both loads are next to each other in memory.
if (!areSlicesNextToEachOther(*First, *Second))
continue;
assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!");
--GlobalLSCost.Loads;
// Move to the next pair.
Second = nullptr;
}
}
/// Check the profitability of all involved LoadedSlice.
/// Currently, it is considered profitable if there is exactly two
/// involved slices (1) which are (2) next to each other in memory, and
/// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
///
/// Note: The order of the elements in \p LoadedSlices may be modified, but not
/// the elements themselves.
///
/// FIXME: When the cost model will be mature enough, we can relax
/// constraints (1) and (2).
static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
const APInt &UsedBits, bool ForCodeSize) {
unsigned NumberOfSlices = LoadedSlices.size();
if (StressLoadSlicing)
return NumberOfSlices > 1;
// Check (1).
if (NumberOfSlices != 2)
return false;
// Check (2).
if (!areUsedBitsDense(UsedBits))
return false;
// Check (3).
LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
// The original code has one big load.
OrigCost.Loads = 1;
for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
const LoadedSlice &LS = LoadedSlices[CurrSlice];
// Accumulate the cost of all the slices.
LoadedSlice::Cost SliceCost(LS, ForCodeSize);
GlobalSlicingCost += SliceCost;
// Account as cost in the original configuration the gain obtained
// with the current slices.
OrigCost.addSliceGain(LS);
}
// If the target supports paired load, adjust the cost accordingly.
adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
return OrigCost > GlobalSlicingCost;
}
/// If the given load, \p LI, is used only by trunc or trunc(lshr)
/// operations, split it in the various pieces being extracted.
///
/// This sort of thing is introduced by SROA.
/// This slicing takes care not to insert overlapping loads.
/// \pre LI is a simple load (i.e., not an atomic or volatile load).
bool DAGCombiner::SliceUpLoad(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isVolatile() || !ISD::isNormalLoad(LD) ||
!LD->getValueType(0).isInteger())
return false;
// Keep track of already used bits to detect overlapping values.
// In that case, we will just abort the transformation.
APInt UsedBits(LD->getValueSizeInBits(0), 0);
SmallVector<LoadedSlice, 4> LoadedSlices;
// Check if this load is used as several smaller chunks of bits.
// Basically, look for uses in trunc or trunc(lshr) and record a new chain
// of computation for each trunc.
for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
UI != UIEnd; ++UI) {
// Skip the uses of the chain.
if (UI.getUse().getResNo() != 0)
continue;
SDNode *User = *UI;
unsigned Shift = 0;
// Check if this is a trunc(lshr).
if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
isa<ConstantSDNode>(User->getOperand(1))) {
Shift = User->getConstantOperandVal(1);
User = *User->use_begin();
}
// At this point, User is a Truncate, iff we encountered, trunc or
// trunc(lshr).
if (User->getOpcode() != ISD::TRUNCATE)
return false;
// The width of the type must be a power of 2 and greater than 8-bits.
// Otherwise the load cannot be represented in LLVM IR.
// Moreover, if we shifted with a non-8-bits multiple, the slice
// will be across several bytes. We do not support that.
unsigned Width = User->getValueSizeInBits(0);
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
return false;
// Build the slice for this chain of computations.
LoadedSlice LS(User, LD, Shift, &DAG);
APInt CurrentUsedBits = LS.getUsedBits();
// Check if this slice overlaps with another.
if ((CurrentUsedBits & UsedBits) != 0)
return false;
// Update the bits used globally.
UsedBits |= CurrentUsedBits;
// Check if the new slice would be legal.
if (!LS.isLegal())
return false;
// Record the slice.
LoadedSlices.push_back(LS);
}
// Abort slicing if it does not seem to be profitable.
if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
return false;
++SlicedLoads;
// Rewrite each chain to use an independent load.
// By construction, each chain can be represented by a unique load.
// Prepare the argument for the new token factor for all the slices.
SmallVector<SDValue, 8> ArgChains;
for (SmallVectorImpl<LoadedSlice>::const_iterator
LSIt = LoadedSlices.begin(),
LSItEnd = LoadedSlices.end();
LSIt != LSItEnd; ++LSIt) {
SDValue SliceInst = LSIt->loadSlice();
CombineTo(LSIt->Inst, SliceInst, true);
if (SliceInst.getOpcode() != ISD::LOAD)
SliceInst = SliceInst.getOperand(0);
assert(SliceInst->getOpcode() == ISD::LOAD &&
"It takes more than a zext to get to the loaded slice!!");
ArgChains.push_back(SliceInst.getValue(1));
}
SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
ArgChains);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
AddToWorklist(Chain.getNode());
return true;
}
/// Check to see if V is (and load (ptr), imm), where the load is having
/// specific bytes cleared out. If so, return the byte size being masked out
/// and the shift amount.
static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
std::pair<unsigned, unsigned> Result(0, 0);
// Check for the structure we're looking for.
if (V->getOpcode() != ISD::AND ||
!isa<ConstantSDNode>(V->getOperand(1)) ||
!ISD::isNormalLoad(V->getOperand(0).getNode()))
return Result;
// Check the chain and pointer.
LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
// This only handles simple types.
if (V.getValueType() != MVT::i16 &&
V.getValueType() != MVT::i32 &&
V.getValueType() != MVT::i64)
return Result;
// Check the constant mask. Invert it so that the bits being masked out are
// 0 and the bits being kept are 1. Use getSExtValue so that leading bits
// follow the sign bit for uniformity.
uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
unsigned NotMaskLZ = countLeadingZeros(NotMask);
if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
unsigned NotMaskTZ = countTrailingZeros(NotMask);
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
if (NotMaskLZ == 64) return Result; // All zero mask.
// See if we have a continuous run of bits. If so, we have 0*1+0*
if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64)
return Result;
// Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
if (V.getValueType() != MVT::i64 && NotMaskLZ)
NotMaskLZ -= 64-V.getValueSizeInBits();
unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
switch (MaskedBytes) {
case 1:
case 2:
case 4: break;
default: return Result; // All one mask, or 5-byte mask.
}
// Verify that the first bit starts at a multiple of mask so that the access
// is aligned the same as the access width.
if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
// For narrowing to be valid, it must be the case that the load the
// immediately preceding memory operation before the store.
if (LD == Chain.getNode())
; // ok.
else if (Chain->getOpcode() == ISD::TokenFactor &&
SDValue(LD, 1).hasOneUse()) {
// LD has only 1 chain use so they are no indirect dependencies.
bool isOk = false;
for (const SDValue &ChainOp : Chain->op_values())
if (ChainOp.getNode() == LD) {
isOk = true;
break;
}
if (!isOk)
return Result;
} else
return Result; // Fail.
Result.first = MaskedBytes;
Result.second = NotMaskTZ/8;
return Result;
}
/// Check to see if IVal is something that provides a value as specified by
/// MaskInfo. If so, replace the specified store with a narrower store of
/// truncated IVal.
static SDNode *
ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
SDValue IVal, StoreSDNode *St,
DAGCombiner *DC) {
unsigned NumBytes = MaskInfo.first;
unsigned ByteShift = MaskInfo.second;
SelectionDAG &DAG = DC->getDAG();
// Check to see if IVal is all zeros in the part being masked in by the 'or'
// that uses this. If not, this is not a replacement.
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
ByteShift*8, (ByteShift+NumBytes)*8);
if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr;
// Check that it is legal on the target to do this. It is legal if the new
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type
// legalization.
MVT VT = MVT::getIntegerVT(NumBytes*8);
if (!DC->isTypeLegal(VT))
return nullptr;
// Okay, we can do this! Replace the 'St' store with a store of IVal that is
// shifted by ByteShift and truncated down to NumBytes.
if (ByteShift) {
SDLoc DL(IVal);
IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal,
DAG.getConstant(ByteShift*8, DL,
DC->getShiftAmountTy(IVal.getValueType())));
}
// Figure out the offset for the store and the alignment of the access.
unsigned StOffset;
unsigned NewAlign = St->getAlignment();
if (DAG.getDataLayout().isLittleEndian())
StOffset = ByteShift;
else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
SDValue Ptr = St->getBasePtr();
if (StOffset) {
SDLoc DL(IVal);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(),
Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType()));
NewAlign = MinAlign(NewAlign, StOffset);
}
// Truncate down to the new size.
IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);
++OpsNarrowed;
return DAG
.getStore(St->getChain(), SDLoc(St), IVal, Ptr,
St->getPointerInfo().getWithOffset(StOffset), NewAlign)
.getNode();
}
/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
/// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
/// narrowing the load and store if it would end up being a win for performance
/// or code size.
SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
if (ST->isVolatile())
return SDValue();
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
EVT VT = Value.getValueType();
if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
return SDValue();
unsigned Opc = Value.getOpcode();
// If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
// is a byte mask indicating a consecutive number of bytes, check to see if
// Y is known to provide just those bytes. If so, we try to replace the
// load + replace + store sequence with a single (narrower) store, which makes
// the load dead.
if (Opc == ISD::OR) {
std::pair<unsigned, unsigned> MaskedLoad;
MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
if (MaskedLoad.first)
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(1), ST,this))
return SDValue(NewST, 0);
// Or is commutative, so try swapping X and Y.
MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
if (MaskedLoad.first)
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(0), ST,this))
return SDValue(NewST, 0);
}
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
SDValue N0 = Value.getOperand(0);
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
Chain == SDValue(N0.getNode(), 1)) {
LoadSDNode *LD = cast<LoadSDNode>(N0);
if (LD->getBasePtr() != Ptr ||
LD->getPointerInfo().getAddrSpace() !=
ST->getPointerInfo().getAddrSpace())
return SDValue();
// Find the type to narrow it the load / op / store to.
SDValue N1 = Value.getOperand(1);
unsigned BitWidth = N1.getValueSizeInBits();
APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
if (Opc == ISD::AND)
Imm ^= APInt::getAllOnesValue(BitWidth);
if (Imm == 0 || Imm.isAllOnesValue())
return SDValue();
unsigned ShAmt = Imm.countTrailingZeros();
unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
unsigned NewBW = NextPowerOf2(MSB - ShAmt);
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
// The narrowing should be profitable, the load/store operation should be
// legal (or custom) and the store size should be equal to the NewVT width.
while (NewBW < BitWidth &&
(NewVT.getStoreSizeInBits() != NewBW ||
!TLI.isOperationLegalOrCustom(Opc, NewVT) ||
!TLI.isNarrowingProfitable(VT, NewVT))) {
NewBW = NextPowerOf2(NewBW);
NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
}
if (NewBW >= BitWidth)
return SDValue();
// If the lsb changed does not start at the type bitwidth boundary,
// start at the previous one.
if (ShAmt % NewBW)
ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
std::min(BitWidth, ShAmt + NewBW));
if ((Imm & Mask) == Imm) {
APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
if (Opc == ISD::AND)
NewImm ^= APInt::getAllOnesValue(NewBW);
uint64_t PtrOff = ShAmt / 8;
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (DAG.getDataLayout().isBigEndian())
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
Ptr.getValueType(), Ptr,
DAG.getConstant(PtrOff, SDLoc(LD),
Ptr.getValueType()));
SDValue NewLD =
DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
LD->getMemOperand()->getFlags(), LD->getAAInfo());
SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
DAG.getConstant(NewImm, SDLoc(Value),
NewVT));
SDValue NewST =
DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr,
ST->getPointerInfo().getWithOffset(PtrOff), NewAlign);
AddToWorklist(NewPtr.getNode());
AddToWorklist(NewLD.getNode());
AddToWorklist(NewVal.getNode());
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
++OpsNarrowed;
return NewST;
}
}
return SDValue();
}
/// For a given floating point load / store pair, if the load value isn't used
/// by any other operations, then consider transforming the pair to integer
/// load / store operations if the target deems the transformation profitable.
SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Value = ST->getValue();
if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
Value.hasOneUse()) {
LoadSDNode *LD = cast<LoadSDNode>(Value);
EVT VT = LD->getMemoryVT();
if (!VT.isFloatingPoint() ||
VT != ST->getMemoryVT() ||
LD->isNonTemporal() ||
ST->isNonTemporal() ||
LD->getPointerInfo().getAddrSpace() != 0 ||
ST->getPointerInfo().getAddrSpace() != 0)
return SDValue();
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
!TLI.isOperationLegal(ISD::STORE, IntVT) ||
!TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
!TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
return SDValue();
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
SDValue NewLD =
DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(),
LD->getPointerInfo(), LDAlign);
SDValue NewST =
DAG.getStore(ST->getChain(), SDLoc(N), NewLD, ST->getBasePtr(),
ST->getPointerInfo(), STAlign);
AddToWorklist(NewLD.getNode());
AddToWorklist(NewST.getNode());
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
++LdStFP2Int;
return NewST;
}
return SDValue();
}
// This is a helper function for visitMUL to check the profitability
// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
// MulNode is the original multiply, AddNode is (add x, c1),
// and ConstNode is c2.
//
// If the (add x, c1) has multiple uses, we could increase
// the number of adds if we make this transformation.
// It would only be worth doing this if we can remove a
// multiply in the process. Check for that here.
// To illustrate:
// (A + c1) * c3
// (A + c2) * c3
// We're checking for cases where we have common "c3 * A" expressions.
bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode,
SDValue &AddNode,
SDValue &ConstNode) {
APInt Val;
// If the add only has one use, this would be OK to do.
if (AddNode.getNode()->hasOneUse())
return true;
// Walk all the users of the constant with which we're multiplying.
for (SDNode *Use : ConstNode->uses()) {
if (Use == MulNode) // This use is the one we're on right now. Skip it.
continue;
if (Use->getOpcode() == ISD::MUL) { // We have another multiply use.
SDNode *OtherOp;
SDNode *MulVar = AddNode.getOperand(0).getNode();
// OtherOp is what we're multiplying against the constant.
if (Use->getOperand(0) == ConstNode)
OtherOp = Use->getOperand(1).getNode();
else
OtherOp = Use->getOperand(0).getNode();
// Check to see if multiply is with the same operand of our "add".
//
// ConstNode = CONST
// Use = ConstNode * A <-- visiting Use. OtherOp is A.
// ...
// AddNode = (A + c1) <-- MulVar is A.
// = AddNode * ConstNode <-- current visiting instruction.
//
// If we make this transformation, we will have a common
// multiply (ConstNode * A) that we can save.
if (OtherOp == MulVar)
return true;
// Now check to see if a future expansion will give us a common
// multiply.
//
// ConstNode = CONST
// AddNode = (A + c1)
// ... = AddNode * ConstNode <-- current visiting instruction.
// ...
// OtherOp = (A + c2)
// Use = OtherOp * ConstNode <-- visiting Use.
//
// If we make this transformation, we will have a common
// multiply (CONST * A) after we also do the same transformation
// to the "t2" instruction.
if (OtherOp->getOpcode() == ISD::ADD &&
DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) &&
OtherOp->getOperand(0).getNode() == MulVar)
return true;
}
}
// Didn't find a case where this would be profitable.
return false;
}
SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
unsigned NumStores) {
SmallVector<SDValue, 8> Chains;
SmallPtrSet<const SDNode *, 8> Visited;
SDLoc StoreDL(StoreNodes[0].MemNode);
for (unsigned i = 0; i < NumStores; ++i) {
Visited.insert(StoreNodes[i].MemNode);
}
// don't include nodes that are children or repeated nodes.
for (unsigned i = 0; i < NumStores; ++i) {
if (Visited.insert(StoreNodes[i].MemNode->getChain().getNode()).second)
Chains.push_back(StoreNodes[i].MemNode->getChain());
}
assert(Chains.size() > 0 && "Chain should have generated a chain");
return DAG.getTokenFactor(StoreDL, Chains);
}
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores,
bool IsConstantSrc, bool UseVector, bool UseTrunc) {
// Make sure we have something to merge.
if (NumStores < 2)
return false;
// The latest Node in the DAG.
SDLoc DL(StoreNodes[0].MemNode);
int64_t ElementSizeBits = MemVT.getStoreSizeInBits();
unsigned SizeInBits = NumStores * ElementSizeBits;
unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
EVT StoreTy;
if (UseVector) {
unsigned Elts = NumStores * NumMemElts;
// Get the type for the merged vector store.
StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
} else
StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
SDValue StoredVal;
if (UseVector) {
if (IsConstantSrc) {
SmallVector<SDValue, 8> BuildVector;
for (unsigned I = 0; I != NumStores; ++I) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[I].MemNode);
SDValue Val = St->getValue();
// If constant is of the wrong type, convert it now.
if (MemVT != Val.getValueType()) {
Val = peekThroughBitcasts(Val);
// Deal with constants of wrong size.
if (ElementSizeBits != Val.getValueSizeInBits()) {
EVT IntMemVT =
EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
if (isa<ConstantFPSDNode>(Val)) {
// Not clear how to truncate FP values.
return false;
} else if (auto *C = dyn_cast<ConstantSDNode>(Val))
Val = DAG.getConstant(C->getAPIntValue()
.zextOrTrunc(Val.getValueSizeInBits())
.zextOrTrunc(ElementSizeBits),
SDLoc(C), IntMemVT);
}
// Make sure correctly size type is the correct type.
Val = DAG.getBitcast(MemVT, Val);
}
BuildVector.push_back(Val);
}
StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
DL, StoreTy, BuildVector);
} else {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumStores; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue Val = peekThroughBitcasts(St->getValue());
// All operands of BUILD_VECTOR / CONCAT_VECTOR must be of
// type MemVT. If the underlying value is not the correct
// type, but it is an extraction of an appropriate vector we
// can recast Val to be of the correct type. This may require
// converting between EXTRACT_VECTOR_ELT and
// EXTRACT_SUBVECTOR.
if ((MemVT != Val.getValueType()) &&
(Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
Val.getOpcode() == ISD::EXTRACT_SUBVECTOR)) {
EVT MemVTScalarTy = MemVT.getScalarType();
// We may need to add a bitcast here to get types to line up.
if (MemVTScalarTy != Val.getValueType().getScalarType()) {
Val = DAG.getBitcast(MemVT, Val);
} else {
unsigned OpC = MemVT.isVector() ? ISD::EXTRACT_SUBVECTOR
: ISD::EXTRACT_VECTOR_ELT;
SDValue Vec = Val.getOperand(0);
SDValue Idx = Val.getOperand(1);
Val = DAG.getNode(OpC, SDLoc(Val), MemVT, Vec, Idx);
}
}
Ops.push_back(Val);
}
// Build the extracted vector elements back into a vector.
StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
DL, StoreTy, Ops);
}
} else {
// We should always use a vector store when merging extracted vector
// elements, so this path implies a store of constants.
assert(IsConstantSrc && "Merged vector elements should use vector store");
APInt StoreInt(SizeInBits, 0);
// Construct a single integer constant which is made of the smaller
// constant inputs.
bool IsLE = DAG.getDataLayout().isLittleEndian();
for (unsigned i = 0; i < NumStores; ++i) {
unsigned Idx = IsLE ? (NumStores - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
SDValue Val = St->getValue();
Val = peekThroughBitcasts(Val);
StoreInt <<= ElementSizeBits;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
StoreInt |= C->getAPIntValue()
.zextOrTrunc(ElementSizeBits)
.zextOrTrunc(SizeInBits);
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
StoreInt |= C->getValueAPF()
.bitcastToAPInt()
.zextOrTrunc(ElementSizeBits)
.zextOrTrunc(SizeInBits);
// If fp truncation is necessary give up for now.
if (MemVT.getSizeInBits() != ElementSizeBits)
return false;
} else {
llvm_unreachable("Invalid constant element type");
}
}
// Create the new Load and Store operations.
StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
}
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores);
// make sure we use trunc store if it's necessary to be legal.
SDValue NewStore;
if (!UseTrunc) {
NewStore = DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(),
FirstInChain->getAlignment());
} else { // Must be realized as a trunc store
EVT LegalizedStoredValTy =
TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
unsigned LegalizedStoreSize = LegalizedStoredValTy.getSizeInBits();
ConstantSDNode *C = cast<ConstantSDNode>(StoredVal);
SDValue ExtendedStoreVal =
DAG.getConstant(C->getAPIntValue().zextOrTrunc(LegalizedStoreSize), DL,
LegalizedStoredValTy);
NewStore = DAG.getTruncStore(
NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
FirstInChain->getAlignment(),
FirstInChain->getMemOperand()->getFlags());
}
// Replace all merged stores with the new store.
for (unsigned i = 0; i < NumStores; ++i)
CombineTo(StoreNodes[i].MemNode, NewStore);
AddToWorklist(NewChain.getNode());
return true;
}
void DAGCombiner::getStoreMergeCandidates(
StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes,
SDNode *&RootNode) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
EVT MemVT = St->getMemoryVT();
SDValue Val = peekThroughBitcasts(St->getValue());
// We must have a base and an offset.
if (!BasePtr.getBase().getNode())
return;
// Do not handle stores to undef base pointers.
if (BasePtr.getBase().isUndef())
return;
bool IsConstantSrc = isa<ConstantSDNode>(Val) || isa<ConstantFPSDNode>(Val);
bool IsExtractVecSrc = (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
Val.getOpcode() == ISD::EXTRACT_SUBVECTOR);
bool IsLoadSrc = isa<LoadSDNode>(Val);
BaseIndexOffset LBasePtr;
// Match on loadbaseptr if relevant.
EVT LoadVT;
if (IsLoadSrc) {
auto *Ld = cast<LoadSDNode>(Val);
LBasePtr = BaseIndexOffset::match(Ld, DAG);
LoadVT = Ld->getMemoryVT();
// Load and store should be the same type.
if (MemVT != LoadVT)
return;
// Loads must only have one use.
if (!Ld->hasNUsesOfValue(1, 0))
return;
// The memory operands must not be volatile/indexed.
if (Ld->isVolatile() || Ld->isIndexed())
return;
}
auto CandidateMatch = [&](StoreSDNode *Other, BaseIndexOffset &Ptr,
int64_t &Offset) -> bool {
// The memory operands must not be volatile/indexed.
if (Other->isVolatile() || Other->isIndexed())
return false;
// Don't mix temporal stores with non-temporal stores.
if (St->isNonTemporal() != Other->isNonTemporal())
return false;
SDValue OtherBC = peekThroughBitcasts(Other->getValue());
// Allow merging constants of different types as integers.
bool NoTypeMatch = (MemVT.isInteger()) ? !MemVT.bitsEq(Other->getMemoryVT())
: Other->getMemoryVT() != MemVT;
if (IsLoadSrc) {
if (NoTypeMatch)
return false;
// The Load's Base Ptr must also match
if (LoadSDNode *OtherLd = dyn_cast<LoadSDNode>(OtherBC)) {
BaseIndexOffset LPtr = BaseIndexOffset::match(OtherLd, DAG);
if (LoadVT != OtherLd->getMemoryVT())
return false;
// Loads must only have one use.
if (!OtherLd->hasNUsesOfValue(1, 0))
return false;
// The memory operands must not be volatile/indexed.
if (OtherLd->isVolatile() || OtherLd->isIndexed())
return false;
// Don't mix temporal loads with non-temporal loads.
if (cast<LoadSDNode>(Val)->isNonTemporal() != OtherLd->isNonTemporal())
return false;
if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
return false;
} else
return false;
}
if (IsConstantSrc) {
if (NoTypeMatch)
return false;
if (!(isa<ConstantSDNode>(OtherBC) || isa<ConstantFPSDNode>(OtherBC)))
return false;
}
if (IsExtractVecSrc) {
// Do not merge truncated stores here.
if (Other->isTruncatingStore())
return false;
if (!MemVT.bitsEq(OtherBC.getValueType()))
return false;
if (OtherBC.getOpcode() != ISD::EXTRACT_VECTOR_ELT &&
OtherBC.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return false;
}
Ptr = BaseIndexOffset::match(Other, DAG);
return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
};
// We looking for a root node which is an ancestor to all mergable
// stores. We search up through a load, to our root and then down
// through all children. For instance we will find Store{1,2,3} if
// St is Store1, Store2. or Store3 where the root is not a load
// which always true for nonvolatile ops. TODO: Expand
// the search to find all valid candidates through multiple layers of loads.
//
// Root
// |-------|-------|
// Load Load Store3
// | |
// Store1 Store2
//
// FIXME: We should be able to climb and
// descend TokenFactors to find candidates as well.
RootNode = St->getChain().getNode();
unsigned NumNodesExplored = 0;
if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
RootNode = Ldn->getChain().getNode();
for (auto I = RootNode->use_begin(), E = RootNode->use_end();
I != E && NumNodesExplored < 1024; ++I, ++NumNodesExplored)
if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) // walk down chain
for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
if (I2.getOperandNo() == 0)
if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I2)) {
BaseIndexOffset Ptr;
int64_t PtrDiff;
if (CandidateMatch(OtherST, Ptr, PtrDiff))
StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
}
} else
for (auto I = RootNode->use_begin(), E = RootNode->use_end();
I != E && NumNodesExplored < 1024; ++I, ++NumNodesExplored)
if (I.getOperandNo() == 0)
if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) {
BaseIndexOffset Ptr;
int64_t PtrDiff;
if (CandidateMatch(OtherST, Ptr, PtrDiff))
StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
}
}
// We need to check that merging these stores does not cause a loop in
// the DAG. Any store candidate may depend on another candidate
// indirectly through its operand (we already consider dependencies
// through the chain). Check in parallel by searching up from
// non-chain operands of candidates.
bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
SDNode *RootNode) {
// FIXME: We should be able to truncate a full search of
// predecessors by doing a BFS and keeping tabs the originating
// stores from which worklist nodes come from in a similar way to
// TokenFactor simplfication.
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 8> Worklist;
// RootNode is a predecessor to all candidates so we need not search
// past it. Add RootNode (peeking through TokenFactors). Do not count
// these towards size check.
Worklist.push_back(RootNode);
while (!Worklist.empty()) {
auto N = Worklist.pop_back_val();
if (!Visited.insert(N).second)
continue; // Already present in Visited.
if (N->getOpcode() == ISD::TokenFactor) {
for (SDValue Op : N->ops())
Worklist.push_back(Op.getNode());
}
}
// Don't count pruning nodes towards max.
unsigned int Max = 1024 + Visited.size();
// Search Ops of store candidates.
for (unsigned i = 0; i < NumStores; ++i) {
SDNode *N = StoreNodes[i].MemNode;
// Of the 4 Store Operands:
// * Chain (Op 0) -> We have already considered these
// in candidate selection and can be
// safely ignored
// * Value (Op 1) -> Cycles may happen (e.g. through load chains)
// * Address (Op 2) -> Merged addresses may only vary by a fixed constant,
// but aren't necessarily fromt the same base node, so
// cycles possible (e.g. via indexed store).
// * (Op 3) -> Represents the pre or post-indexing offset (or undef for
// non-indexed stores). Not constant on all targets (e.g. ARM)
// and so can participate in a cycle.
for (unsigned j = 1; j < N->getNumOperands(); ++j)
Worklist.push_back(N->getOperand(j).getNode());
}
// Search through DAG. We can stop early if we find a store node.
for (unsigned i = 0; i < NumStores; ++i)
if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist,
Max))
return false;
return true;
}
bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
if (OptLevel == CodeGenOpt::None)
return false;
EVT MemVT = St->getMemoryVT();
int64_t ElementSizeBytes = MemVT.getStoreSize();
unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
return false;
bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
// This function cannot currently deal with non-byte-sized memory sizes.
if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
return false;
if (!MemVT.isSimple())
return false;
// Perform an early exit check. Do not bother looking at stored values that
// are not constants, loads, or extracted vector elements.
SDValue StoredVal = peekThroughBitcasts(St->getValue());
bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
isa<ConstantFPSDNode>(StoredVal);
bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR);
bool IsNonTemporalStore = St->isNonTemporal();
bool IsNonTemporalLoad =
IsLoadSrc && cast<LoadSDNode>(StoredVal)->isNonTemporal();
if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc)
return false;
SmallVector<MemOpLink, 8> StoreNodes;
SDNode *RootNode;
// Find potential store merge candidates by searching through chain sub-DAG
getStoreMergeCandidates(St, StoreNodes, RootNode);
// Check if there is anything to merge.
if (StoreNodes.size() < 2)
return false;
// Sort the memory operands according to their distance from the
// base pointer.
llvm::sort(StoreNodes, [](MemOpLink LHS, MemOpLink RHS) {
return LHS.OffsetFromBase < RHS.OffsetFromBase;
});
// Store Merge attempts to merge the lowest stores. This generally
// works out as if successful, as the remaining stores are checked
// after the first collection of stores is merged. However, in the
// case that a non-mergeable store is found first, e.g., {p[-2],
// p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent
// mergeable cases. To prevent this, we prune such stores from the
// front of StoreNodes here.
bool RV = false;
while (StoreNodes.size() > 1) {
unsigned StartIdx = 0;
while ((StartIdx + 1 < StoreNodes.size()) &&
StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
StoreNodes[StartIdx + 1].OffsetFromBase)
++StartIdx;
// Bail if we don't have enough candidates to merge.
if (StartIdx + 1 >= StoreNodes.size())
return RV;
if (StartIdx)
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
// Scan the memory operations on the chain and find the first
// non-consecutive store memory address.
unsigned NumConsecutiveStores = 1;
int64_t StartAddress = StoreNodes[0].OffsetFromBase;
// Check that the addresses are consecutive starting from the second
// element in the list of stores.
for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
break;
NumConsecutiveStores = i + 1;
}
if (NumConsecutiveStores < 2) {
StoreNodes.erase(StoreNodes.begin(),
StoreNodes.begin() + NumConsecutiveStores);
continue;
}
// The node with the lowest store address.
LLVMContext &Context = *DAG.getContext();
const DataLayout &DL = DAG.getDataLayout();
// Store the constants into memory as one consecutive store.
if (IsConstantSrc) {
while (NumConsecutiveStores >= 2) {
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
unsigned LastLegalType = 1;
unsigned LastLegalVectorType = 1;
bool LastIntegerTrunc = false;
bool NonZero = false;
unsigned FirstZeroAfterNonZero = NumConsecutiveStores;
for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
StoreSDNode *ST = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue StoredVal = ST->getValue();
bool IsElementZero = false;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal))
IsElementZero = C->isNullValue();
else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal))
IsElementZero = C->getConstantFPValue()->isNullValue();
if (IsElementZero) {
if (NonZero && FirstZeroAfterNonZero == NumConsecutiveStores)
FirstZeroAfterNonZero = i;
}
NonZero |= !IsElementZero;
// Find a legal type for the constant store.
unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
bool IsFast = false;
// Break early when size is too large to be legal.
if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
break;
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstInChain->getMemOperand(), &IsFast) &&
IsFast) {
LastIntegerTrunc = false;
LastLegalType = i + 1;
// Or check whether a truncstore is legal.
} else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValTy =
TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstInChain->getMemOperand(),
&IsFast) &&
IsFast) {
LastIntegerTrunc = true;
LastLegalType = i + 1;
}
}
// We only use vectors if the constant is known to be zero or the
// target allows it and the function is not marked with the
// noimplicitfloat attribute.
if ((!NonZero ||
TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) &&
!NoVectors) {
// Find a legal type for the vector store.
unsigned Elts = (i + 1) * NumMemElts;
EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
TLI.allowsMemoryAccess(
Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) &&
IsFast)
LastLegalVectorType = i + 1;
}
}
bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType;
// Check if we found a legal integer type that creates a meaningful
// merge.
if (NumElem < 2) {
// We know that candidate stores are in order and of correct
// shape. While there is no mergeable sequence from the
// beginning one may start later in the sequence. The only
// reason a merge of size N could have failed where another of
// the same size would not have, is if the alignment has
// improved or we've dropped a non-zero value. Drop as many
// candidates as we can here.
unsigned NumSkip = 1;
while (
(NumSkip < NumConsecutiveStores) &&
(NumSkip < FirstZeroAfterNonZero) &&
(StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
NumSkip++;
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
NumConsecutiveStores -= NumSkip;
continue;
}
// Check that we can merge these candidates without causing a cycle.
if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
RootNode)) {
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
NumConsecutiveStores -= NumElem;
continue;
}
RV |= MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, true,
UseVector, LastIntegerTrunc);
// Remove merged stores for next iteration.
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
NumConsecutiveStores -= NumElem;
}
continue;
}
// When extracting multiple vector elements, try to store them
// in one vector store rather than a sequence of scalar stores.
if (IsExtractVecSrc) {
// Loop on Consecutive Stores on success.
while (NumConsecutiveStores >= 2) {
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
unsigned NumStoresToMerge = 1;
for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
// Find a legal type for the vector store.
unsigned Elts = (i + 1) * NumMemElts;
EVT Ty =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
bool IsFast;
// Break early when size is too large to be legal.
if (Ty.getSizeInBits() > MaximumLegalStoreInBits)
break;
if (TLI.isTypeLegal(Ty) &&
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
TLI.allowsMemoryAccess(Context, DL, Ty,
*FirstInChain->getMemOperand(), &IsFast) &&
IsFast)
NumStoresToMerge = i + 1;
}
// Check if we found a legal integer type creating a meaningful
// merge.
if (NumStoresToMerge < 2) {
// We know that candidate stores are in order and of correct
// shape. While there is no mergeable sequence from the
// beginning one may start later in the sequence. The only
// reason a merge of size N could have failed where another of
// the same size would not have, is if the alignment has
// improved. Drop as many candidates as we can here.
unsigned NumSkip = 1;
while (
(NumSkip < NumConsecutiveStores) &&
(StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
NumSkip++;
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
NumConsecutiveStores -= NumSkip;
continue;
}
// Check that we can merge these candidates without causing a cycle.
if (!checkMergeStoreCandidatesForDependencies(
StoreNodes, NumStoresToMerge, RootNode)) {
StoreNodes.erase(StoreNodes.begin(),
StoreNodes.begin() + NumStoresToMerge);
NumConsecutiveStores -= NumStoresToMerge;
continue;
}
RV |= MergeStoresOfConstantsOrVecElts(
StoreNodes, MemVT, NumStoresToMerge, false, true, false);
StoreNodes.erase(StoreNodes.begin(),
StoreNodes.begin() + NumStoresToMerge);
NumConsecutiveStores -= NumStoresToMerge;
}
continue;
}
// Below we handle the case of multiple consecutive stores that
// come from multiple consecutive loads. We merge them into a single
// wide load and a single wide store.
// Look for load nodes which are used by the stored values.
SmallVector<MemOpLink, 8> LoadNodes;
// Find acceptable loads. Loads need to have the same chain (token factor),
// must not be zext, volatile, indexed, and they must be consecutive.
BaseIndexOffset LdBasePtr;
for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue Val = peekThroughBitcasts(St->getValue());
LoadSDNode *Ld = cast<LoadSDNode>(Val);
BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
// If this is not the first ptr that we check.
int64_t LdOffset = 0;
if (LdBasePtr.getBase().getNode()) {
// The base ptr must be the same.
if (!LdBasePtr.equalBaseIndex(LdPtr, DAG, LdOffset))
break;
} else {
// Check that all other base pointers are the same as this one.
LdBasePtr = LdPtr;
}
// We found a potential memory operand to merge.
LoadNodes.push_back(MemOpLink(Ld, LdOffset));
}
while (NumConsecutiveStores >= 2 && LoadNodes.size() >= 2) {
// If we have load/store pair instructions and we only have two values,
// don't bother merging.
unsigned RequiredAlignment;
if (LoadNodes.size() == 2 &&
TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
StoreNodes[0].MemNode->getAlignment() >= RequiredAlignment) {
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + 2);
break;
}
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
unsigned FirstLoadAlign = FirstLoad->getAlignment();
// Scan the memory operations on the chain and find the first
// non-consecutive load memory address. These variables hold the index in
// the store node array.
unsigned LastConsecutiveLoad = 1;
// This variable refers to the size and not index in the array.
unsigned LastLegalVectorType = 1;
unsigned LastLegalIntegerType = 1;
bool isDereferenceable = true;
bool DoIntegerTruncate = false;
StartAddress = LoadNodes[0].OffsetFromBase;
SDValue FirstChain = FirstLoad->getChain();
for (unsigned i = 1; i < LoadNodes.size(); ++i) {
// All loads must share the same chain.
if (LoadNodes[i].MemNode->getChain() != FirstChain)
break;
int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
break;
LastConsecutiveLoad = i;
if (isDereferenceable && !LoadNodes[i].MemNode->isDereferenceable())
isDereferenceable = false;
// Find a legal type for the vector store.
unsigned Elts = (i + 1) * NumMemElts;
EVT StoreTy = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
// Break early when size is too large to be legal.
if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
break;
bool IsFastSt, IsFastLd;
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstInChain->getMemOperand(), &IsFastSt) &&
IsFastSt &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalVectorType = i + 1;
}
// Find a legal type for the integer store.
unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
StoreTy = EVT::getIntegerVT(Context, SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstInChain->getMemOperand(), &IsFastSt) &&
IsFastSt &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalIntegerType = i + 1;
DoIntegerTruncate = false;
// Or check whether a truncstore and extload is legal.
} else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoreTy);
if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValTy,
StoreTy) &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy,
StoreTy) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstInChain->getMemOperand(),
&IsFastSt) &&
IsFastSt &&
TLI.allowsMemoryAccess(Context, DL, StoreTy,
*FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalIntegerType = i + 1;
DoIntegerTruncate = true;
}
}
}
// Only use vector types if the vector type is larger than the integer
// type. If they are the same, use integers.
bool UseVectorTy =
LastLegalVectorType > LastLegalIntegerType && !NoVectors;
unsigned LastLegalType =
std::max(LastLegalVectorType, LastLegalIntegerType);
// We add +1 here because the LastXXX variables refer to location while
// the NumElem refers to array/index size.
unsigned NumElem =
std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
NumElem = std::min(LastLegalType, NumElem);
if (NumElem < 2) {
// We know that candidate stores are in order and of correct
// shape. While there is no mergeable sequence from the
// beginning one may start later in the sequence. The only
// reason a merge of size N could have failed where another of
// the same size would not have is if the alignment or either
// the load or store has improved. Drop as many candidates as we
// can here.
unsigned NumSkip = 1;
while ((NumSkip < LoadNodes.size()) &&
(LoadNodes[NumSkip].MemNode->getAlignment() <= FirstLoadAlign) &&
(StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
NumSkip++;
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumSkip);
NumConsecutiveStores -= NumSkip;
continue;
}
// Check that we can merge these candidates without causing a cycle.
if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
RootNode)) {
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
NumConsecutiveStores -= NumElem;
continue;
}
// Find if it is better to use vectors or integers to load and store
// to memory.
EVT JointMemOpVT;
if (UseVectorTy) {
// Find a legal type for the vector store.
unsigned Elts = NumElem * NumMemElts;
JointMemOpVT = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
} else {
unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
SDLoc StoreDL(StoreNodes[0].MemNode);
// The merged loads are required to have the same incoming chain, so
// using the first's chain is acceptable.
SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
AddToWorklist(NewStoreChain.getNode());
MachineMemOperand::Flags LdMMOFlags =
isDereferenceable ? MachineMemOperand::MODereferenceable
: MachineMemOperand::MONone;
if (IsNonTemporalLoad)
LdMMOFlags |= MachineMemOperand::MONonTemporal;
MachineMemOperand::Flags StMMOFlags =
IsNonTemporalStore ? MachineMemOperand::MONonTemporal
: MachineMemOperand::MONone;
SDValue NewLoad, NewStore;
if (UseVectorTy || !DoIntegerTruncate) {
NewLoad =
DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(),
FirstLoad->getBasePtr(), FirstLoad->getPointerInfo(),
FirstLoadAlign, LdMMOFlags);
NewStore = DAG.getStore(
NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(), FirstStoreAlign, StMMOFlags);
} else { // This must be the truncstore/extload case
EVT ExtendedTy =
TLI.getTypeToTransformTo(*DAG.getContext(), JointMemOpVT);
NewLoad = DAG.getExtLoad(ISD::EXTLOAD, LoadDL, ExtendedTy,
FirstLoad->getChain(), FirstLoad->getBasePtr(),
FirstLoad->getPointerInfo(), JointMemOpVT,
FirstLoadAlign, LdMMOFlags);
NewStore = DAG.getTruncStore(NewStoreChain, StoreDL, NewLoad,
FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(),
JointMemOpVT, FirstInChain->getAlignment(),
FirstInChain->getMemOperand()->getFlags());
}
// Transfer chain users from old loads to the new load.
for (unsigned i = 0; i < NumElem; ++i) {
LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
SDValue(NewLoad.getNode(), 1));
}
// Replace the all stores with the new store. Recursively remove
// corresponding value if its no longer used.
for (unsigned i = 0; i < NumElem; ++i) {
SDValue Val = StoreNodes[i].MemNode->getOperand(1);
CombineTo(StoreNodes[i].MemNode, NewStore);
if (Val.getNode()->use_empty())
recursivelyDeleteUnusedNodes(Val.getNode());
}
RV = true;
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
NumConsecutiveStores -= NumElem;
}
}
return RV;
}
SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {
SDLoc SL(ST);
SDValue ReplStore;
// Replace the chain to avoid dependency.
if (ST->isTruncatingStore()) {
ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(),
ST->getBasePtr(), ST->getMemoryVT(),
ST->getMemOperand());
} else {
ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(),
ST->getMemOperand());
}
// Create token to keep both nodes around.
SDValue Token = DAG.getNode(ISD::TokenFactor, SL,
MVT::Other, ST->getChain(), ReplStore);
// Make sure the new and old chains are cleaned up.
AddToWorklist(Token.getNode());
// Don't add users to work list.
return CombineTo(ST, Token, false);
}
SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
SDValue Value = ST->getValue();
if (Value.getOpcode() == ISD::TargetConstantFP)
return SDValue();
SDLoc DL(ST);
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value);
// NOTE: If the original store is volatile, this transform must not increase
// the number of stores. For example, on x86-32 an f64 can be stored in one
// processor operation but an i64 (which is not legal) requires two. So the
// transform should not be done in this case.
SDValue Tmp;
switch (CFP->getSimpleValueType(0).SimpleTy) {
default:
llvm_unreachable("Unknown FP type");
case MVT::f16: // We don't do this for these yet.
case MVT::f80:
case MVT::f128:
case MVT::ppcf128:
return SDValue();
case MVT::f32:
if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
;
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), SDLoc(CFP),
MVT::i32);
return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand());
}
return SDValue();
case MVT::f64:
if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
;
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
getZExtValue(), SDLoc(CFP), MVT::i64);
return DAG.getStore(Chain, DL, Tmp,
Ptr, ST->getMemOperand());
}
if (!ST->isVolatile() &&
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
// Many FP stores are not made apparent until after legalize, e.g. for
// argument passing. Since this is so common, custom legalize the
// 64-bit integer store into two 32-bit stores.
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
unsigned Alignment = ST->getAlignment();
MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
AAMDNodes AAInfo = ST->getAAInfo();
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
ST->getAlignment(), MMOFlags, AAInfo);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(4, DL, Ptr.getValueType()));
Alignment = MinAlign(Alignment, 4U);
SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
Alignment, MMOFlags, AAInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
St0, St1);
}
return SDValue();
}
}
SDValue DAGCombiner::visitSTORE(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
// If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original.
if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
ST->isUnindexed()) {
EVT SVT = Value.getOperand(0).getValueType();
// If the store is volatile, we only want to change the store type if the
// resulting store is legal. Otherwise we might increase the number of
// memory accesses. We don't care if the original type was legal or not
// as we assume software couldn't rely on the number of accesses of an
// illegal type.
if (((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegal(ISD::STORE, SVT)) &&
TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT,
DAG, *ST->getMemOperand())) {
return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
ST->getPointerInfo(), ST->getAlignment(),
ST->getMemOperand()->getFlags(), ST->getAAInfo());
}
}
// Turn 'store undef, Ptr' -> nothing.
if (Value.isUndef() && ST->isUnindexed())
return Chain;
// Try to infer better alignment information than the store already has.
if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > ST->getAlignment() && ST->getSrcValueOffset() % Align == 0) {
SDValue NewStore =
DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(),
ST->getMemoryVT(), Align,
ST->getMemOperand()->getFlags(), ST->getAAInfo());
// NewStore will always be N as we are only refining the alignment
assert(NewStore.getNode() == N);
(void)NewStore;
}
}
}
// Try transforming a pair floating point load / store ops to integer
// load / store ops.
if (SDValue NewST = TransformFPLoadStorePair(N))
return NewST;
// Try transforming several stores into STORE (BSWAP).
if (SDValue Store = MatchStoreCombine(ST))
return Store;
if (ST->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes, on this store and any
// adjacent stores.
if (findBetterNeighborChains(ST)) {
// replaceStoreChain uses CombineTo, which handled all of the worklist
// manipulation. Return the original node to not do anything else.
return SDValue(ST, 0);
}
Chain = ST->getChain();
}
// FIXME: is there such a thing as a truncating indexed store?
if (ST->isTruncatingStore() && ST->isUnindexed() &&
Value.getValueType().isInteger() &&
(!isa<ConstantSDNode>(Value) ||
!cast<ConstantSDNode>(Value)->isOpaque())) {
APInt TruncDemandedBits =
APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
ST->getMemoryVT().getScalarSizeInBits());
// See if we can simplify the input to this truncstore with knowledge that
// only the low bits are being used. For example:
// "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
SDValue Shorter = DAG.GetDemandedBits(Value, TruncDemandedBits);
AddToWorklist(Value.getNode());
if (Shorter)
return DAG.getTruncStore(Chain, SDLoc(N), Shorter, Ptr, ST->getMemoryVT(),
ST->getMemOperand());
// Otherwise, see if we can simplify the operation with
// SimplifyDemandedBits, which only works if the value has a single use.
if (SimplifyDemandedBits(Value, TruncDemandedBits)) {
// Re-visit the store if anything changed and the store hasn't been merged
// with another node (N is deleted) SimplifyDemandedBits will add Value's
// node back to the worklist if necessary, but we also need to re-visit
// the Store node itself.
if (N->getOpcode() != ISD::DELETED_NODE)
AddToWorklist(N);
return SDValue(N, 0);
}
}
// If this is a load followed by a store to the same location, then the store
// is dead/noop.
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
ST->isUnindexed() && !ST->isVolatile() &&
// There can't be any side effects between the load and store, such as
// a call or store.
Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
// The store is dead, remove it.
return Chain;
}
}
if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
if (ST->isUnindexed() && !ST->isVolatile() && ST1->isUnindexed() &&
!ST1->isVolatile()) {
if (ST1->getBasePtr() == Ptr && ST1->getValue() == Value &&
ST->getMemoryVT() == ST1->getMemoryVT()) {
// If this is a store followed by a store with the same value to the
// same location, then the store is dead/noop.
return Chain;
}
if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() &&
!ST1->getBasePtr().isUndef()) {
const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG);
const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG);
unsigned STBitSize = ST->getMemoryVT().getSizeInBits();
unsigned ChainBitSize = ST1->getMemoryVT().getSizeInBits();
// If this is a store who's preceding store to a subset of the current
// location and no one other node is chained to that store we can
// effectively drop the store. Do not remove stores to undef as they may
// be used as data sinks.
if (STBase.contains(DAG, STBitSize, ChainBase, ChainBitSize)) {
CombineTo(ST1, ST1->getChain());
return SDValue();
}
// If ST stores to a subset of preceding store's write set, we may be
// able to fold ST's value into the preceding stored value. As we know
// the other uses of ST1's chain are unconcerned with ST, this folding
// will not affect those nodes.
int64_t BitOffset;
if (ChainBase.contains(DAG, ChainBitSize, STBase, STBitSize,
BitOffset)) {
SDValue ChainValue = ST1->getValue();
if (auto *C1 = dyn_cast<ConstantSDNode>(ChainValue)) {
if (auto *C = dyn_cast<ConstantSDNode>(Value)) {
APInt Val = C1->getAPIntValue();
APInt InsertVal = C->getAPIntValue().zextOrTrunc(STBitSize);
// FIXME: Handle Big-endian mode.
if (!DAG.getDataLayout().isBigEndian()) {
Val.insertBits(InsertVal, BitOffset);
SDValue NewSDVal =
DAG.getConstant(Val, SDLoc(C), ChainValue.getValueType(),
C1->isTargetOpcode(), C1->isOpaque());
SDNode *NewST1 = DAG.UpdateNodeOperands(
ST1, ST1->getChain(), NewSDVal, ST1->getOperand(2),
ST1->getOperand(3));
return CombineTo(ST, SDValue(NewST1, 0));
}
}
}
} // End ST subset of ST1 case.
}
}
}
// If this is an FP_ROUND or TRUNC followed by a store, fold this into a
// truncating store. We can do this even if this is already a truncstore.
if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
&& Value.getNode()->hasOneUse() && ST->isUnindexed() &&
TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
ST->getMemoryVT())) {
return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
Ptr, ST->getMemoryVT(), ST->getMemOperand());
}
// Always perform this optimization before types are legal. If the target
// prefers, also try this after legalization to catch stores that were created
// by intrinsics or other nodes.
if (!LegalTypes || (TLI.mergeStoresAfterLegalization(ST->getMemoryVT()))) {
while (true) {
// There can be multiple store sequences on the same chain.
// Keep trying to merge store sequences until we are unable to do so
// or until we merge the last store on the chain.
bool Changed = MergeConsecutiveStores(ST);
if (!Changed) break;
// Return N as merge only uses CombineTo and no worklist clean
// up is necessary.
if (N->getOpcode() == ISD::DELETED_NODE || !isa<StoreSDNode>(N))
return SDValue(N, 0);
}
}
// Try transforming N to an indexed store.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
//
// Make sure to do this only after attempting to merge stores in order to
// avoid changing the types of some subset of stores due to visit order,
// preventing their merging.
if (isa<ConstantFPSDNode>(ST->getValue())) {
if (SDValue NewSt = replaceStoreOfFPConstant(ST))
return NewSt;
}
if (SDValue NewSt = splitMergedValStore(ST))
return NewSt;
return ReduceLoadOpStoreWidth(N);
}
SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {
const auto *LifetimeEnd = cast<LifetimeSDNode>(N);
if (!LifetimeEnd->hasOffset())
return SDValue();
const BaseIndexOffset LifetimeEndBase(N->getOperand(1), SDValue(),
LifetimeEnd->getOffset(), false);
// We walk up the chains to find stores.
SmallVector<SDValue, 8> Chains = {N->getOperand(0)};
while (!Chains.empty()) {
SDValue Chain = Chains.back();
Chains.pop_back();
if (!Chain.hasOneUse())
continue;
switch (Chain.getOpcode()) {
case ISD::TokenFactor:
for (unsigned Nops = Chain.getNumOperands(); Nops;)
Chains.push_back(Chain.getOperand(--Nops));
break;
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
// We can forward past any lifetime start/end that can be proven not to
// alias the node.
if (!isAlias(Chain.getNode(), N))
Chains.push_back(Chain.getOperand(0));
break;
case ISD::STORE: {
StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain);
if (ST->isVolatile() || ST->isIndexed())
continue;
const BaseIndexOffset StoreBase = BaseIndexOffset::match(ST, DAG);
// If we store purely within object bounds just before its lifetime ends,
// we can remove the store.
if (LifetimeEndBase.contains(DAG, LifetimeEnd->getSize() * 8, StoreBase,
ST->getMemoryVT().getStoreSizeInBits())) {
LLVM_DEBUG(dbgs() << "\nRemoving store:"; StoreBase.dump();
dbgs() << "\nwithin LIFETIME_END of : ";
LifetimeEndBase.dump(); dbgs() << "\n");
CombineTo(ST, ST->getChain());
return SDValue(N, 0);
}
}
}
}
return SDValue();
}
/// For the instruction sequence of store below, F and I values
/// are bundled together as an i64 value before being stored into memory.
/// Sometimes it is more efficent to generate separate stores for F and I,
/// which can remove the bitwise instructions or sink them to colder places.
///
/// (store (or (zext (bitcast F to i32) to i64),
/// (shl (zext I to i64), 32)), addr) -->
/// (store F, addr) and (store I, addr+4)
///
/// Similarly, splitting for other merged store can also be beneficial, like:
/// For pair of {i32, i32}, i64 store --> two i32 stores.
/// For pair of {i32, i16}, i64 store --> two i32 stores.
/// For pair of {i16, i16}, i32 store --> two i16 stores.
/// For pair of {i16, i8}, i32 store --> two i16 stores.
/// For pair of {i8, i8}, i16 store --> two i8 stores.
///
/// We allow each target to determine specifically which kind of splitting is
/// supported.
///
/// The store patterns are commonly seen from the simple code snippet below
/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
/// void goo(const std::pair<int, float> &);
/// hoo() {
/// ...
/// goo(std::make_pair(tmp, ftmp));
/// ...
/// }
///
SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
if (OptLevel == CodeGenOpt::None)
return SDValue();
SDValue Val = ST->getValue();
SDLoc DL(ST);
// Match OR operand.
if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR)
return SDValue();
// Match SHL operand and get Lower and Higher parts of Val.
SDValue Op1 = Val.getOperand(0);
SDValue Op2 = Val.getOperand(1);
SDValue Lo, Hi;
if (Op1.getOpcode() != ISD::SHL) {
std::swap(Op1, Op2);
if (Op1.getOpcode() != ISD::SHL)
return SDValue();
}
Lo = Op2;
Hi = Op1.getOperand(0);
if (!Op1.hasOneUse())
return SDValue();
// Match shift amount to HalfValBitSize.
unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;
ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
return SDValue();
// Lo and Hi are zero-extended from int with size less equal than 32
// to i64.
if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
!Lo.getOperand(0).getValueType().isScalarInteger() ||
Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||
Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
!Hi.getOperand(0).getValueType().isScalarInteger() ||
Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)
return SDValue();
// Use the EVT of low and high parts before bitcast as the input
// of target query.
EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST)
? Lo.getOperand(0).getValueType()
: Lo.getValueType();
EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST)
? Hi.getOperand(0).getValueType()
: Hi.getValueType();
if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
return SDValue();
// Start to split store.
unsigned Alignment = ST->getAlignment();
MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
AAMDNodes AAInfo = ST->getAAInfo();
// Change the sizes of Lo and Hi's value types to HalfValBitSize.
EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize);
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0));
Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0));
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
// Lower value store.
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
ST->getAlignment(), MMOFlags, AAInfo);
Ptr =
DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType()));
// Higher value store.
SDValue St1 =
DAG.getStore(St0, DL, Hi, Ptr,
ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
Alignment / 2, MMOFlags, AAInfo);
return St1;
}
/// Convert a disguised subvector insertion into a shuffle:
/// insert_vector_elt V, (bitcast X from vector type), IdxC -->
/// bitcast(shuffle (bitcast V), (extended X), Mask)
/// Note: We do not use an insert_subvector node because that requires a legal
/// subvector type.
SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {
SDValue InsertVal = N->getOperand(1);
if (InsertVal.getOpcode() != ISD::BITCAST || !InsertVal.hasOneUse() ||
!InsertVal.getOperand(0).getValueType().isVector())
return SDValue();
SDValue SubVec = InsertVal.getOperand(0);
SDValue DestVec = N->getOperand(0);
EVT SubVecVT = SubVec.getValueType();
EVT VT = DestVec.getValueType();
unsigned NumSrcElts = SubVecVT.getVectorNumElements();
unsigned ExtendRatio = VT.getSizeInBits() / SubVecVT.getSizeInBits();
unsigned NumMaskVals = ExtendRatio * NumSrcElts;
// Step 1: Create a shuffle mask that implements this insert operation. The
// vector that we are inserting into will be operand 0 of the shuffle, so
// those elements are just 'i'. The inserted subvector is in the first
// positions of operand 1 of the shuffle. Example:
// insert v4i32 V, (v2i16 X), 2 --> shuffle v8i16 V', X', {0,1,2,3,8,9,6,7}
SmallVector<int, 16> Mask(NumMaskVals);
for (unsigned i = 0; i != NumMaskVals; ++i) {
if (i / NumSrcElts == InsIndex)
Mask[i] = (i % NumSrcElts) + NumMaskVals;
else
Mask[i] = i;
}
// Bail out if the target can not handle the shuffle we want to create.
EVT SubVecEltVT = SubVecVT.getVectorElementType();
EVT ShufVT = EVT::getVectorVT(*DAG.getContext(), SubVecEltVT, NumMaskVals);
if (!TLI.isShuffleMaskLegal(Mask, ShufVT))
return SDValue();
// Step 2: Create a wide vector from the inserted source vector by appending
// undefined elements. This is the same size as our destination vector.
SDLoc DL(N);
SmallVector<SDValue, 8> ConcatOps(ExtendRatio, DAG.getUNDEF(SubVecVT));
ConcatOps[0] = SubVec;
SDValue PaddedSubV = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShufVT, ConcatOps);
// Step 3: Shuffle in the padded subvector.
SDValue DestVecBC = DAG.getBitcast(ShufVT, DestVec);
SDValue Shuf = DAG.getVectorShuffle(ShufVT, DL, DestVecBC, PaddedSubV, Mask);
AddToWorklist(PaddedSubV.getNode());
AddToWorklist(DestVecBC.getNode());
AddToWorklist(Shuf.getNode());
return DAG.getBitcast(VT, Shuf);
}
SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
SDValue InVec = N->getOperand(0);
SDValue InVal = N->getOperand(1);
SDValue EltNo = N->getOperand(2);
SDLoc DL(N);
// If the inserted element is an UNDEF, just use the input vector.
if (InVal.isUndef())
return InVec;
EVT VT = InVec.getValueType();
unsigned NumElts = VT.getVectorNumElements();
// Remove redundant insertions:
// (insert_vector_elt x (extract_vector_elt x idx) idx) -> x
if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1))
return InVec;
auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
if (!IndexC) {
// If this is variable insert to undef vector, it might be better to splat:
// inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT)) {
SmallVector<SDValue, 8> Ops(NumElts, InVal);
return DAG.getBuildVector(VT, DL, Ops);
}
return SDValue();
}
// We must know which element is being inserted for folds below here.
unsigned Elt = IndexC->getZExtValue();
if (SDValue Shuf = combineInsertEltToShuffle(N, Elt))
return Shuf;
// Canonicalize insert_vector_elt dag nodes.
// Example:
// (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
// -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
//
// Do this only if the child insert_vector node has one use; also
// do this only if indices are both constants and Idx1 < Idx0.
if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
&& isa<ConstantSDNode>(InVec.getOperand(2))) {
unsigned OtherElt = InVec.getConstantOperandVal(2);
if (Elt < OtherElt) {
// Swap nodes.
SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
InVec.getOperand(0), InVal, EltNo);
AddToWorklist(NewOp.getNode());
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
}
}
// If we can't generate a legal BUILD_VECTOR, exit
if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return SDValue();
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
// vector elements.
SmallVector<SDValue, 8> Ops;
// Do not combine these two vectors if the output vector will not replace
// the input vector.
if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
Ops.append(InVec.getNode()->op_begin(),
InVec.getNode()->op_end());
} else if (InVec.isUndef()) {
Ops.append(NumElts, DAG.getUNDEF(InVal.getValueType()));
} else {
return SDValue();
}
assert(Ops.size() == NumElts && "Unexpected vector size");
// Insert the element
if (Elt < Ops.size()) {
// All the operands of BUILD_VECTOR must have the same type;
// we enforce that here.
EVT OpVT = Ops[0].getValueType();
Ops[Elt] = OpVT.isInteger() ? DAG.getAnyExtOrTrunc(InVal, DL, OpVT) : InVal;
}
// Return the new vector
return DAG.getBuildVector(VT, DL, Ops);
}
SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
SDValue EltNo,
LoadSDNode *OriginalLoad) {
assert(!OriginalLoad->isVolatile());
EVT ResultVT = EVE->getValueType(0);
EVT VecEltVT = InVecVT.getVectorElementType();
unsigned Align = OriginalLoad->getAlignment();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VecEltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
return SDValue();
ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ?
ISD::NON_EXTLOAD : ISD::EXTLOAD;
if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT))
return SDValue();
Align = NewAlign;
SDValue NewPtr = OriginalLoad->getBasePtr();
SDValue Offset;
EVT PtrType = NewPtr.getValueType();
MachinePointerInfo MPI;
SDLoc DL(EVE);
if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
int Elt = ConstEltNo->getZExtValue();
unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
Offset = DAG.getConstant(PtrOff, DL, PtrType);
MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
} else {
Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType);
Offset = DAG.getNode(
ISD::MUL, DL, PtrType, Offset,
DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType));
// Discard the pointer info except the address space because the memory
// operand can't represent this new access since the offset is variable.
MPI = MachinePointerInfo(OriginalLoad->getPointerInfo().getAddrSpace());
}
NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset);
// The replacement we need to do here is a little tricky: we need to
// replace an extractelement of a load with a load.
// Use ReplaceAllUsesOfValuesWith to do the replacement.
// Note that this replacement assumes that the extractvalue is the only
// use of the load; that's okay because we don't want to perform this
// transformation in other cases anyway.
SDValue Load;
SDValue Chain;
if (ResultVT.bitsGT(VecEltVT)) {
// If the result type of vextract is wider than the load, then issue an
// extending load instead.
ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
VecEltVT)
? ISD::ZEXTLOAD
: ISD::EXTLOAD;
Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT,
OriginalLoad->getChain(), NewPtr, MPI, VecEltVT,
Align, OriginalLoad->getMemOperand()->getFlags(),
OriginalLoad->getAAInfo());
Chain = Load.getValue(1);
} else {
Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr,
MPI, Align, OriginalLoad->getMemOperand()->getFlags(),
OriginalLoad->getAAInfo());
Chain = Load.getValue(1);
if (ResultVT.bitsLT(VecEltVT))
Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
else
Load = DAG.getBitcast(ResultVT, Load);
}
WorklistRemover DeadNodes(*this);
SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
SDValue To[] = { Load, Chain };
DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
// Since we're explicitly calling ReplaceAllUses, add the new node to the
// worklist explicitly as well.
AddToWorklist(Load.getNode());
AddUsersToWorklist(Load.getNode()); // Add users too
// Make sure to revisit this node to clean it up; it will usually be dead.
AddToWorklist(EVE);
++OpsNarrowed;
return SDValue(EVE, 0);
}
/// Transform a vector binary operation into a scalar binary operation by moving
/// the math/logic after an extract element of a vector.
static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
bool LegalOperations) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Vec = ExtElt->getOperand(0);
SDValue Index = ExtElt->getOperand(1);
auto *IndexC = dyn_cast<ConstantSDNode>(Index);
if (!IndexC || !TLI.isBinOp(Vec.getOpcode()) || !Vec.hasOneUse() ||
Vec.getNode()->getNumValues() != 1)
return SDValue();
// Targets may want to avoid this to prevent an expensive register transfer.
if (!TLI.shouldScalarizeBinop(Vec))
return SDValue();
// Extracting an element of a vector constant is constant-folded, so this
// transform is just replacing a vector op with a scalar op while moving the
// extract.
SDValue Op0 = Vec.getOperand(0);
SDValue Op1 = Vec.getOperand(1);
if (isAnyConstantBuildVector(Op0, true) ||
isAnyConstantBuildVector(Op1, true)) {
// extractelt (binop X, C), IndexC --> binop (extractelt X, IndexC), C'
// extractelt (binop C, X), IndexC --> binop C', (extractelt X, IndexC)
SDLoc DL(ExtElt);
EVT VT = ExtElt->getValueType(0);
SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Index);
SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op1, Index);
return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1);
}
return SDValue();
}
SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue VecOp = N->getOperand(0);
SDValue Index = N->getOperand(1);
EVT ScalarVT = N->getValueType(0);
EVT VecVT = VecOp.getValueType();
if (VecOp.isUndef())
return DAG.getUNDEF(ScalarVT);
// extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
//
// This only really matters if the index is non-constant since other combines
// on the constant elements already work.
SDLoc DL(N);
if (VecOp.getOpcode() == ISD::INSERT_VECTOR_ELT &&
Index == VecOp.getOperand(2)) {
SDValue Elt = VecOp.getOperand(1);
return VecVT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, DL, ScalarVT) : Elt;
}
// (vextract (scalar_to_vector val, 0) -> val
if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR) {
// Check if the result type doesn't match the inserted element type. A
// SCALAR_TO_VECTOR may truncate the inserted element and the
// EXTRACT_VECTOR_ELT may widen the extracted vector.
SDValue InOp = VecOp.getOperand(0);
if (InOp.getValueType() != ScalarVT) {
assert(InOp.getValueType().isInteger() && ScalarVT.isInteger());
return DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
}
return InOp;
}
// extract_vector_elt of out-of-bounds element -> UNDEF
auto *IndexC = dyn_cast<ConstantSDNode>(Index);
unsigned NumElts = VecVT.getVectorNumElements();
if (IndexC && IndexC->getAPIntValue().uge(NumElts))
return DAG.getUNDEF(ScalarVT);
// extract_vector_elt (build_vector x, y), 1 -> y
if (IndexC && VecOp.getOpcode() == ISD::BUILD_VECTOR &&
TLI.isTypeLegal(VecVT) &&
(VecOp.hasOneUse() || TLI.aggressivelyPreferBuildVectorSources(VecVT))) {
SDValue Elt = VecOp.getOperand(IndexC->getZExtValue());
EVT InEltVT = Elt.getValueType();
// Sometimes build_vector's scalar input types do not match result type.
if (ScalarVT == InEltVT)
return Elt;
// TODO: It may be useful to truncate if free if the build_vector implicitly
// converts.
}
// TODO: These transforms should not require the 'hasOneUse' restriction, but
// there are regressions on multiple targets without it. We can end up with a
// mess of scalar and vector code if we reduce only part of the DAG to scalar.
if (IndexC && VecOp.getOpcode() == ISD::BITCAST && VecVT.isInteger() &&
VecOp.hasOneUse()) {
// The vector index of the LSBs of the source depend on the endian-ness.
bool IsLE = DAG.getDataLayout().isLittleEndian();
unsigned ExtractIndex = IndexC->getZExtValue();
// extract_elt (v2i32 (bitcast i64:x)), BCTruncElt -> i32 (trunc i64:x)
unsigned BCTruncElt = IsLE ? 0 : NumElts - 1;
SDValue BCSrc = VecOp.getOperand(0);
if (ExtractIndex == BCTruncElt && BCSrc.getValueType().isScalarInteger())
return DAG.getNode(ISD::TRUNCATE, DL, ScalarVT, BCSrc);
if (LegalTypes && BCSrc.getValueType().isInteger() &&
BCSrc.getOpcode() == ISD::SCALAR_TO_VECTOR) {
// ext_elt (bitcast (scalar_to_vec i64 X to v2i64) to v4i32), TruncElt -->
// trunc i64 X to i32
SDValue X = BCSrc.getOperand(0);
assert(X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() &&
"Extract element and scalar to vector can't change element type "
"from FP to integer.");
unsigned XBitWidth = X.getValueSizeInBits();
unsigned VecEltBitWidth = VecVT.getScalarSizeInBits();
BCTruncElt = IsLE ? 0 : XBitWidth / VecEltBitWidth - 1;
// An extract element return value type can be wider than its vector
// operand element type. In that case, the high bits are undefined, so
// it's possible that we may need to extend rather than truncate.
if (ExtractIndex == BCTruncElt && XBitWidth > VecEltBitWidth) {
assert(XBitWidth % VecEltBitWidth == 0 &&
"Scalar bitwidth must be a multiple of vector element bitwidth");
return DAG.getAnyExtOrTrunc(X, DL, ScalarVT);
}
}
}
if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
return BO;
// Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
// We only perform this optimization before the op legalization phase because
// we may introduce new vector instructions which are not backed by TD
// patterns. For example on AVX, extracting elements from a wide vector
// without using extract_subvector. However, if we can find an underlying
// scalar value, then we can always use that.
if (IndexC && VecOp.getOpcode() == ISD::VECTOR_SHUFFLE) {
auto *Shuf = cast<ShuffleVectorSDNode>(VecOp);
// Find the new index to extract from.
int OrigElt = Shuf->getMaskElt(IndexC->getZExtValue());
// Extracting an undef index is undef.
if (OrigElt == -1)
return DAG.getUNDEF(ScalarVT);
// Select the right vector half to extract from.
SDValue SVInVec;
if (OrigElt < (int)NumElts) {
SVInVec = VecOp.getOperand(0);
} else {
SVInVec = VecOp.getOperand(1);
OrigElt -= NumElts;
}
if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
SDValue InOp = SVInVec.getOperand(OrigElt);
if (InOp.getValueType() != ScalarVT) {
assert(InOp.getValueType().isInteger() && ScalarVT.isInteger());
InOp = DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
}
return InOp;
}
// FIXME: We should handle recursing on other vector shuffles and
// scalar_to_vector here as well.
if (!LegalOperations ||
// FIXME: Should really be just isOperationLegalOrCustom.
TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VecVT) ||
TLI.isOperationExpand(ISD::VECTOR_SHUFFLE, VecVT)) {
EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, SVInVec,
DAG.getConstant(OrigElt, DL, IndexTy));
}
}
// If only EXTRACT_VECTOR_ELT nodes use the source vector we can
// simplify it based on the (valid) extraction indices.
if (llvm::all_of(VecOp->uses(), [&](SDNode *Use) {
return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Use->getOperand(0) == VecOp &&
isa<ConstantSDNode>(Use->getOperand(1));
})) {
APInt DemandedElts = APInt::getNullValue(NumElts);
for (SDNode *Use : VecOp->uses()) {
auto *CstElt = cast<ConstantSDNode>(Use->getOperand(1));
if (CstElt->getAPIntValue().ult(NumElts))
DemandedElts.setBit(CstElt->getZExtValue());
}
if (SimplifyDemandedVectorElts(VecOp, DemandedElts, true)) {
// We simplified the vector operand of this extract element. If this
// extract is not dead, visit it again so it is folded properly.
if (N->getOpcode() != ISD::DELETED_NODE)
AddToWorklist(N);
return SDValue(N, 0);
}
}
// Everything under here is trying to match an extract of a loaded value.
// If the result of load has to be truncated, then it's not necessarily
// profitable.
bool BCNumEltsChanged = false;
EVT ExtVT = VecVT.getVectorElementType();
EVT LVT = ExtVT;
if (ScalarVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, ScalarVT))
return SDValue();
if (VecOp.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!VecOp.hasOneUse())
return SDValue();
EVT BCVT = VecOp.getOperand(0).getValueType();
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue();
if (NumElts != BCVT.getVectorNumElements())
BCNumEltsChanged = true;
VecOp = VecOp.getOperand(0);
ExtVT = BCVT.getVectorElementType();
}
// extract (vector load $addr), i --> load $addr + i * size
if (!LegalOperations && !IndexC && VecOp.hasOneUse() &&
ISD::isNormalLoad(VecOp.getNode()) &&
!Index->hasPredecessor(VecOp.getNode())) {
auto *VecLoad = dyn_cast<LoadSDNode>(VecOp);
if (VecLoad && !VecLoad->isVolatile())
return scalarizeExtractedVectorLoad(N, VecVT, Index, VecLoad);
}
// Perform only after legalization to ensure build_vector / vector_shuffle
// optimizations have already been done.
if (!LegalOperations || !IndexC)
return SDValue();
// (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
int Elt = IndexC->getZExtValue();
LoadSDNode *LN0 = nullptr;
if (ISD::isNormalLoad(VecOp.getNode())) {
LN0 = cast<LoadSDNode>(VecOp);
} else if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
VecOp.getOperand(0).getValueType() == ExtVT &&
ISD::isNormalLoad(VecOp.getOperand(0).getNode())) {
// Don't duplicate a load with other uses.
if (!VecOp.hasOneUse())
return SDValue();
LN0 = cast<LoadSDNode>(VecOp.getOperand(0));
}
if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(VecOp)) {
// (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
// =>
// (load $addr+1*size)
// Don't duplicate a load with other uses.
if (!VecOp.hasOneUse())
return SDValue();
// If the bit convert changed the number of elements, it is unsafe
// to examine the mask.
if (BCNumEltsChanged)
return SDValue();
// Select the input vector, guarding against out of range extract vector.
int Idx = (Elt > (int)NumElts) ? -1 : Shuf->getMaskElt(Elt);
VecOp = (Idx < (int)NumElts) ? VecOp.getOperand(0) : VecOp.getOperand(1);
if (VecOp.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!VecOp.hasOneUse())
return SDValue();
VecOp = VecOp.getOperand(0);
}
if (ISD::isNormalLoad(VecOp.getNode())) {
LN0 = cast<LoadSDNode>(VecOp);
Elt = (Idx < (int)NumElts) ? Idx : Idx - (int)NumElts;
Index = DAG.getConstant(Elt, DL, Index.getValueType());
}
}
// Make sure we found a non-volatile load and the extractelement is
// the only use.
if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
return SDValue();
// If Idx was -1 above, Elt is going to be -1, so just return undef.
if (Elt == -1)
return DAG.getUNDEF(LVT);
return scalarizeExtractedVectorLoad(N, VecVT, Index, LN0);
}
// Simplify (build_vec (ext )) to (bitcast (build_vec ))
SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
// We perform this optimization post type-legalization because
// the type-legalizer often scalarizes integer-promoted vectors.
// Performing this optimization before may create bit-casts which
// will be type-legalized to complex code sequences.
// We perform this optimization only before the operation legalizer because we
// may introduce illegal operations.
if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
return SDValue();
unsigned NumInScalars = N->getNumOperands();
SDLoc DL(N);
EVT VT = N->getValueType(0);
// Check to see if this is a BUILD_VECTOR of a bunch of values
// which come from any_extend or zero_extend nodes. If so, we can create
// a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
// optimizations. We do not handle sign-extend because we can't fill the sign
// using shuffles.
EVT SourceType = MVT::Other;
bool AllAnyExt = true;
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
// Ignore undef inputs.
if (In.isUndef()) continue;
bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
// Abort if the element is not an extension.
if (!ZeroExt && !AnyExt) {
SourceType = MVT::Other;
break;
}
// The input is a ZeroExt or AnyExt. Check the original type.
EVT InTy = In.getOperand(0).getValueType();
// Check that all of the widened source types are the same.
if (SourceType == MVT::Other)
// First time.
SourceType = InTy;
else if (InTy != SourceType) {
// Multiple income types. Abort.
SourceType = MVT::Other;
break;
}
// Check if all of the extends are ANY_EXTENDs.
AllAnyExt &= AnyExt;
}
// In order to have valid types, all of the inputs must be extended from the
// same source type and all of the inputs must be any or zero extend.
// Scalar sizes must be a power of two.
EVT OutScalarTy = VT.getScalarType();
bool ValidTypes = SourceType != MVT::Other &&
isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
isPowerOf2_32(SourceType.getSizeInBits());
// Create a new simpler BUILD_VECTOR sequence which other optimizations can
// turn into a single shuffle instruction.
if (!ValidTypes)
return SDValue();
bool isLE = DAG.getDataLayout().isLittleEndian();
unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
assert(ElemRatio > 1 && "Invalid element size ratio");
SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
DAG.getConstant(0, DL, SourceType);
unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
// Populate the new build_vector
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Cast = N->getOperand(i);
assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
Cast.getOpcode() == ISD::ZERO_EXTEND ||
Cast.isUndef()) && "Invalid cast opcode");
SDValue In;
if (Cast.isUndef())
In = DAG.getUNDEF(SourceType);
else
In = Cast->getOperand(0);
unsigned Index = isLE ? (i * ElemRatio) :
(i * ElemRatio + (ElemRatio - 1));
assert(Index < Ops.size() && "Invalid index");
Ops[Index] = In;
}
// The type of the new BUILD_VECTOR node.
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&
"Invalid vector size");
// Check if the new vector type is legal.
if (!isTypeLegal(VecVT) ||
(!TLI.isOperationLegal(ISD::BUILD_VECTOR, VecVT) &&
TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)))
return SDValue();
// Make the new BUILD_VECTOR.
SDValue BV = DAG.getBuildVector(VecVT, DL, Ops);
// The new BUILD_VECTOR node has the potential to be further optimized.
AddToWorklist(BV.getNode());
// Bitcast to the desired type.
return DAG.getBitcast(VT, BV);
}
SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
ArrayRef<int> VectorMask,
SDValue VecIn1, SDValue VecIn2,
unsigned LeftIdx, bool DidSplitVec) {
MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy);
EVT VT = N->getValueType(0);
EVT InVT1 = VecIn1.getValueType();
EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1;
unsigned NumElems = VT.getVectorNumElements();
unsigned ShuffleNumElems = NumElems;
// If we artificially split a vector in two already, then the offsets in the
// operands will all be based off of VecIn1, even those in VecIn2.
unsigned Vec2Offset = DidSplitVec ? 0 : InVT1.getVectorNumElements();
// We can't generate a shuffle node with mismatched input and output types.
// Try to make the types match the type of the output.
if (InVT1 != VT || InVT2 != VT) {
if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) {
// If the output vector length is a multiple of both input lengths,
// we can concatenate them and pad the rest with undefs.
unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits();
assert(NumConcats >= 2 && "Concat needs at least two inputs!");
SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1));
ConcatOps[0] = VecIn1;
ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1);
VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
VecIn2 = SDValue();
} else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) {
if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems))
return SDValue();
if (!VecIn2.getNode()) {
// If we only have one input vector, and it's twice the size of the
// output, split it in two.
VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1,
DAG.getConstant(NumElems, DL, IdxTy));
VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx);
// Since we now have shorter input vectors, adjust the offset of the
// second vector's start.
Vec2Offset = NumElems;
} else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) {
// VecIn1 is wider than the output, and we have another, possibly
// smaller input. Pad the smaller input with undefs, shuffle at the
// input vector width, and extract the output.
// The shuffle type is different than VT, so check legality again.
if (LegalOperations &&
!TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
return SDValue();
// Legalizing INSERT_SUBVECTOR is tricky - you basically have to
// lower it back into a BUILD_VECTOR. So if the inserted type is
// illegal, don't even try.
if (InVT1 != InVT2) {
if (!TLI.isTypeLegal(InVT2))
return SDValue();
VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
}
ShuffleNumElems = NumElems * 2;
} else {
// Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider
// than VecIn1. We can't handle this for now - this case will disappear
// when we start sorting the vectors by type.
return SDValue();
}
} else if (InVT2.getSizeInBits() * 2 == VT.getSizeInBits() &&
InVT1.getSizeInBits() == VT.getSizeInBits()) {
SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
ConcatOps[0] = VecIn2;
VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
} else {
// TODO: Support cases where the length mismatch isn't exactly by a
// factor of 2.
// TODO: Move this check upwards, so that if we have bad type
// mismatches, we don't create any DAG nodes.
return SDValue();
}
}
// Initialize mask to undef.
SmallVector<int, 8> Mask(ShuffleNumElems, -1);
// Only need to run up to the number of elements actually used, not the
// total number of elements in the shuffle - if we are shuffling a wider
// vector, the high lanes should be set to undef.
for (unsigned i = 0; i != NumElems; ++i) {
if (VectorMask[i] <= 0)
continue;
unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1);
if (VectorMask[i] == (int)LeftIdx) {
Mask[i] = ExtIndex;
} else if (VectorMask[i] == (int)LeftIdx + 1) {
Mask[i] = Vec2Offset + ExtIndex;
}
}
// The type the input vectors may have changed above.
InVT1 = VecIn1.getValueType();
// If we already have a VecIn2, it should have the same type as VecIn1.
// If we don't, get an undef/zero vector of the appropriate type.
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1);
assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type.");
SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask);
if (ShuffleNumElems > NumElems)
Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx);
return Shuffle;
}
static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
assert(BV->getOpcode() == ISD::BUILD_VECTOR && "Expected build vector");
// First, determine where the build vector is not undef.
// TODO: We could extend this to handle zero elements as well as undefs.
int NumBVOps = BV->getNumOperands();
int ZextElt = -1;
for (int i = 0; i != NumBVOps; ++i) {
SDValue Op = BV->getOperand(i);
if (Op.isUndef())
continue;
if (ZextElt == -1)
ZextElt = i;
else
return SDValue();
}
// Bail out if there's no non-undef element.
if (ZextElt == -1)
return SDValue();
// The build vector contains some number of undef elements and exactly
// one other element. That other element must be a zero-extended scalar
// extracted from a vector at a constant index to turn this into a shuffle.
// Also, require that the build vector does not implicitly truncate/extend
// its elements.
// TODO: This could be enhanced to allow ANY_EXTEND as well as ZERO_EXTEND.
EVT VT = BV->getValueType(0);
SDValue Zext = BV->getOperand(ZextElt);
if (Zext.getOpcode() != ISD::ZERO_EXTEND || !Zext.hasOneUse() ||
Zext.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)) ||
Zext.getValueSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
// The zero-extend must be a multiple of the source size, and we must be
// building a vector of the same size as the source of the extract element.
SDValue Extract = Zext.getOperand(0);
unsigned DestSize = Zext.getValueSizeInBits();
unsigned SrcSize = Extract.getValueSizeInBits();
if (DestSize % SrcSize != 0 ||
Extract.getOperand(0).getValueSizeInBits() != VT.getSizeInBits())
return SDValue();
// Create a shuffle mask that will combine the extracted element with zeros
// and undefs.
int ZextRatio = DestSize / SrcSize;
int NumMaskElts = NumBVOps * ZextRatio;
SmallVector<int, 32> ShufMask(NumMaskElts, -1);
for (int i = 0; i != NumMaskElts; ++i) {
if (i / ZextRatio == ZextElt) {
// The low bits of the (potentially translated) extracted element map to
// the source vector. The high bits map to zero. We will use a zero vector
// as the 2nd source operand of the shuffle, so use the 1st element of
// that vector (mask value is number-of-elements) for the high bits.
if (i % ZextRatio == 0)
ShufMask[i] = Extract.getConstantOperandVal(1);
else
ShufMask[i] = NumMaskElts;
}
// Undef elements of the build vector remain undef because we initialize
// the shuffle mask with -1.
}
// Turn this into a shuffle with zero if that's legal.
EVT VecVT = Extract.getOperand(0).getValueType();
if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(ShufMask, VecVT))
return SDValue();
// buildvec undef, ..., (zext (extractelt V, IndexC)), undef... -->
// bitcast (shuffle V, ZeroVec, VectorMask)
SDLoc DL(BV);
SDValue ZeroVec = DAG.getConstant(0, DL, VecVT);
SDValue Shuf = DAG.getVectorShuffle(VecVT, DL, Extract.getOperand(0), ZeroVec,
ShufMask);
return DAG.getBitcast(VT, Shuf);
}
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If the types of the vectors we're extracting from allow it,
// turn this into a vector_shuffle node.
SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
// Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
if (!isTypeLegal(VT))
return SDValue();
if (SDValue V = reduceBuildVecToShuffleWithZero(N, DAG))
return V;
// May only combine to shuffle after legalize if shuffle is legal.
if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
return SDValue();
bool UsesZeroVector = false;
unsigned NumElems = N->getNumOperands();
// Record, for each element of the newly built vector, which input vector
// that element comes from. -1 stands for undef, 0 for the zero vector,
// and positive values for the input vectors.
// VectorMask maps each element to its vector number, and VecIn maps vector
// numbers to their initial SDValues.
SmallVector<int, 8> VectorMask(NumElems, -1);
SmallVector<SDValue, 8> VecIn;
VecIn.push_back(SDValue());
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Op = N->getOperand(i);
if (Op.isUndef())
continue;
// See if we can use a blend with a zero vector.
// TODO: Should we generalize this to a blend with an arbitrary constant
// vector?
if (isNullConstant(Op) || isNullFPConstant(Op)) {
UsesZeroVector = true;
VectorMask[i] = 0;
continue;
}
// Not an undef or zero. If the input is something other than an
// EXTRACT_VECTOR_ELT with an in-range constant index, bail out.
if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Op.getOperand(1)))
return SDValue();
SDValue ExtractedFromVec = Op.getOperand(0);
const APInt &ExtractIdx = Op.getConstantOperandAPInt(1);
if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements()))
return SDValue();
// All inputs must have the same element type as the output.
if (VT.getVectorElementType() !=
ExtractedFromVec.getValueType().getVectorElementType())
return SDValue();
// Have we seen this input vector before?
// The vectors are expected to be tiny (usually 1 or 2 elements), so using
// a map back from SDValues to numbers isn't worth it.
unsigned Idx = std::distance(
VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec));
if (Idx == VecIn.size())
VecIn.push_back(ExtractedFromVec);
VectorMask[i] = Idx;
}
// If we didn't find at least one input vector, bail out.
if (VecIn.size() < 2)
return SDValue();
// If all the Operands of BUILD_VECTOR extract from same
// vector, then split the vector efficiently based on the maximum
// vector access index and adjust the VectorMask and
// VecIn accordingly.
bool DidSplitVec = false;
if (VecIn.size() == 2) {
unsigned MaxIndex = 0;
unsigned NearestPow2 = 0;
SDValue Vec = VecIn.back();
EVT InVT = Vec.getValueType();
MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
SmallVector<unsigned, 8> IndexVec(NumElems, 0);
for (unsigned i = 0; i < NumElems; i++) {
if (VectorMask[i] <= 0)
continue;
unsigned Index = N->getOperand(i).getConstantOperandVal(1);
IndexVec[i] = Index;
MaxIndex = std::max(MaxIndex, Index);
}
NearestPow2 = PowerOf2Ceil(MaxIndex);
if (InVT.isSimple() && NearestPow2 > 2 && MaxIndex < NearestPow2 &&
NumElems * 2 < NearestPow2) {
unsigned SplitSize = NearestPow2 / 2;
EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(), SplitSize);
if (TLI.isTypeLegal(SplitVT)) {
SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
DAG.getConstant(SplitSize, DL, IdxTy));
SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
DAG.getConstant(0, DL, IdxTy));
VecIn.pop_back();
VecIn.push_back(VecIn1);
VecIn.push_back(VecIn2);
DidSplitVec = true;
for (unsigned i = 0; i < NumElems; i++) {
if (VectorMask[i] <= 0)
continue;
VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2;
}
}
}
}
// TODO: We want to sort the vectors by descending length, so that adjacent
// pairs have similar length, and the longer vector is always first in the
// pair.
// TODO: Should this fire if some of the input vectors has illegal type (like
// it does now), or should we let legalization run its course first?
// Shuffle phase:
// Take pairs of vectors, and shuffle them so that the result has elements
// from these vectors in the correct places.
// For example, given:
// t10: i32 = extract_vector_elt t1, Constant:i64<0>
// t11: i32 = extract_vector_elt t2, Constant:i64<0>
// t12: i32 = extract_vector_elt t3, Constant:i64<0>
// t13: i32 = extract_vector_elt t1, Constant:i64<1>
// t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13
// We will generate:
// t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2
// t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef
SmallVector<SDValue, 4> Shuffles;
for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) {
unsigned LeftIdx = 2 * In + 1;
SDValue VecLeft = VecIn[LeftIdx];
SDValue VecRight =
(LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue();
if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft,
VecRight, LeftIdx, DidSplitVec))
Shuffles.push_back(Shuffle);
else
return SDValue();
}
// If we need the zero vector as an "ingredient" in the blend tree, add it
// to the list of shuffles.
if (UsesZeroVector)
Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT)
: DAG.getConstantFP(0.0, DL, VT));
// If we only have one shuffle, we're done.
if (Shuffles.size() == 1)
return Shuffles[0];
// Update the vector mask to point to the post-shuffle vectors.
for (int &Vec : VectorMask)
if (Vec == 0)
Vec = Shuffles.size() - 1;
else
Vec = (Vec - 1) / 2;
// More than one shuffle. Generate a binary tree of blends, e.g. if from
// the previous step we got the set of shuffles t10, t11, t12, t13, we will
// generate:
// t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2
// t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4
// t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6
// t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8
// t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11
// t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13
// t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21
// Make sure the initial size of the shuffle list is even.
if (Shuffles.size() % 2)
Shuffles.push_back(DAG.getUNDEF(VT));
for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) {
if (CurSize % 2) {
Shuffles[CurSize] = DAG.getUNDEF(VT);
CurSize++;
}
for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) {
int Left = 2 * In;
int Right = 2 * In + 1;
SmallVector<int, 8> Mask(NumElems, -1);
for (unsigned i = 0; i != NumElems; ++i) {
if (VectorMask[i] == Left) {
Mask[i] = i;
VectorMask[i] = In;
} else if (VectorMask[i] == Right) {
Mask[i] = i + NumElems;
VectorMask[i] = In;
}
}
Shuffles[In] =
DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
}
}
return Shuffles[0];
}
// Try to turn a build vector of zero extends of extract vector elts into a
// a vector zero extend and possibly an extract subvector.
// TODO: Support sign extend?
// TODO: Allow undef elements?
SDValue DAGCombiner::convertBuildVecZextToZext(SDNode *N) {
if (LegalOperations)
return SDValue();
EVT VT = N->getValueType(0);
bool FoundZeroExtend = false;
SDValue Op0 = N->getOperand(0);
auto checkElem = [&](SDValue Op) -> int64_t {
unsigned Opc = Op.getOpcode();
FoundZeroExtend |= (Opc == ISD::ZERO_EXTEND);
if ((Opc == ISD::ZERO_EXTEND || Opc == ISD::ANY_EXTEND) &&
Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0).getOperand(0) == Op.getOperand(0).getOperand(0))
if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(0).getOperand(1)))
return C->getZExtValue();
return -1;
};
// Make sure the first element matches
// (zext (extract_vector_elt X, C))
int64_t Offset = checkElem(Op0);
if (Offset < 0)
return SDValue();
unsigned NumElems = N->getNumOperands();
SDValue In = Op0.getOperand(0).getOperand(0);
EVT InSVT = In.getValueType().getScalarType();
EVT InVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumElems);
// Don't create an illegal input type after type legalization.
if (LegalTypes && !TLI.isTypeLegal(InVT))
return SDValue();
// Ensure all the elements come from the same vector and are adjacent.
for (unsigned i = 1; i != NumElems; ++i) {
if ((Offset + i) != checkElem(N->getOperand(i)))
return SDValue();
}
SDLoc DL(N);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InVT, In,
Op0.getOperand(0).getOperand(1));
return DAG.getNode(FoundZeroExtend ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND, DL,
VT, In);
}
SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
EVT VT = N->getValueType(0);
// A vector built entirely of undefs is undef.
if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(VT);
// If this is a splat of a bitcast from another vector, change to a
// concat_vector.
// For example:
// (build_vector (i64 (bitcast (v2i32 X))), (i64 (bitcast (v2i32 X)))) ->
// (v2i64 (bitcast (concat_vectors (v2i32 X), (v2i32 X))))
//
// If X is a build_vector itself, the concat can become a larger build_vector.
// TODO: Maybe this is useful for non-splat too?
if (!LegalOperations) {
if (SDValue Splat = cast<BuildVectorSDNode>(N)->getSplatValue()) {
Splat = peekThroughBitcasts(Splat);
EVT SrcVT = Splat.getValueType();
if (SrcVT.isVector()) {
unsigned NumElts = N->getNumOperands() * SrcVT.getVectorNumElements();
EVT NewVT = EVT::getVectorVT(*DAG.getContext(),
SrcVT.getVectorElementType(), NumElts);
if (!LegalTypes || TLI.isTypeLegal(NewVT)) {
SmallVector<SDValue, 8> Ops(N->getNumOperands(), Splat);
SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N),
NewVT, Ops);
return DAG.getBitcast(VT, Concat);
}
}
}
}
// Check if we can express BUILD VECTOR via subvector extract.
if (!LegalTypes && (N->getNumOperands() > 1)) {
SDValue Op0 = N->getOperand(0);
auto checkElem = [&](SDValue Op) -> uint64_t {
if ((Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) &&
(Op0.getOperand(0) == Op.getOperand(0)))
if (auto CNode = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
return CNode->getZExtValue();
return -1;
};
int Offset = checkElem(Op0);
for (unsigned i = 0; i < N->getNumOperands(); ++i) {
if (Offset + i != checkElem(N->getOperand(i))) {
Offset = -1;
break;
}
}
if ((Offset == 0) &&
(Op0.getOperand(0).getValueType() == N->getValueType(0)))
return Op0.getOperand(0);
if ((Offset != -1) &&
((Offset % N->getValueType(0).getVectorNumElements()) ==
0)) // IDX must be multiple of output size.
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), N->getValueType(0),
Op0.getOperand(0), Op0.getOperand(1));
}
if (SDValue V = convertBuildVecZextToZext(N))
return V;
if (SDValue V = reduceBuildVecExtToExtBuildVec(N))
return V;
if (SDValue V = reduceBuildVecToShuffle(N))
return V;
return SDValue();
}
static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT OpVT = N->getOperand(0).getValueType();
// If the operands are legal vectors, leave them alone.
if (TLI.isTypeLegal(OpVT))
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
SmallVector<SDValue, 8> Ops;
EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
// Keep track of what we encounter.
bool AnyInteger = false;
bool AnyFP = false;
for (const SDValue &Op : N->ops()) {
if (ISD::BITCAST == Op.getOpcode() &&
!Op.getOperand(0).getValueType().isVector())
Ops.push_back(Op.getOperand(0));
else if (ISD::UNDEF == Op.getOpcode())
Ops.push_back(ScalarUndef);
else
return SDValue();
// Note whether we encounter an integer or floating point scalar.
// If it's neither, bail out, it could be something weird like x86mmx.
EVT LastOpVT = Ops.back().getValueType();
if (LastOpVT.isFloatingPoint())
AnyFP = true;
else if (LastOpVT.isInteger())
AnyInteger = true;
else
return SDValue();
}
// If any of the operands is a floating point scalar bitcast to a vector,
// use floating point types throughout, and bitcast everything.
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
if (AnyInteger) {
for (SDValue &Op : Ops) {
if (Op.getValueType() == SVT)
continue;
if (Op.isUndef())
Op = ScalarUndef;
else
Op = DAG.getBitcast(SVT, Op);
}
}
}
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
VT.getSizeInBits() / SVT.getSizeInBits());
return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops));
}
// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
// operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at
// most two distinct vectors the same size as the result, attempt to turn this
// into a legal shuffle.
static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
EVT OpVT = N->getOperand(0).getValueType();
int NumElts = VT.getVectorNumElements();
int NumOpElts = OpVT.getVectorNumElements();
SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT);
SmallVector<int, 8> Mask;
for (SDValue Op : N->ops()) {
Op = peekThroughBitcasts(Op);
// UNDEF nodes convert to UNDEF shuffle mask values.
if (Op.isUndef()) {
Mask.append((unsigned)NumOpElts, -1);
continue;
}
if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return SDValue();
// What vector are we extracting the subvector from and at what index?
SDValue ExtVec = Op.getOperand(0);
// We want the EVT of the original extraction to correctly scale the
// extraction index.
EVT ExtVT = ExtVec.getValueType();
ExtVec = peekThroughBitcasts(ExtVec);
// UNDEF nodes convert to UNDEF shuffle mask values.
if (ExtVec.isUndef()) {
Mask.append((unsigned)NumOpElts, -1);
continue;
}
if (!isa<ConstantSDNode>(Op.getOperand(1)))
return SDValue();
int ExtIdx = Op.getConstantOperandVal(1);
// Ensure that we are extracting a subvector from a vector the same
// size as the result.
if (ExtVT.getSizeInBits() != VT.getSizeInBits())
return SDValue();
// Scale the subvector index to account for any bitcast.
int NumExtElts = ExtVT.getVectorNumElements();
if (0 == (NumExtElts % NumElts))
ExtIdx /= (NumExtElts / NumElts);
else if (0 == (NumElts % NumExtElts))
ExtIdx *= (NumElts / NumExtElts);
else
return SDValue();
// At most we can reference 2 inputs in the final shuffle.
if (SV0.isUndef() || SV0 == ExtVec) {
SV0 = ExtVec;
for (int i = 0; i != NumOpElts; ++i)
Mask.push_back(i + ExtIdx);
} else if (SV1.isUndef() || SV1 == ExtVec) {
SV1 = ExtVec;
for (int i = 0; i != NumOpElts; ++i)
Mask.push_back(i + ExtIdx + NumElts);
} else {
return SDValue();
}
}
if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT))
return SDValue();
return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0),
DAG.getBitcast(VT, SV1), Mask);
}
SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// If we only have one input vector, we don't need to do any concatenation.
if (N->getNumOperands() == 1)
return N->getOperand(0);
// Check if all of the operands are undefs.
EVT VT = N->getValueType(0);
if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(VT);
// Optimize concat_vectors where all but the first of the vectors are undef.
if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) {
return Op.isUndef();
})) {
SDValue In = N->getOperand(0);
assert(In.getValueType().isVector() && "Must concat vectors");
SDValue Scalar = peekThroughOneUseBitcasts(In);
// concat_vectors(scalar_to_vector(scalar), undef) ->
// scalar_to_vector(scalar)
if (!LegalOperations && Scalar.getOpcode() == ISD::SCALAR_TO_VECTOR &&
Scalar.hasOneUse()) {
EVT SVT = Scalar.getValueType().getVectorElementType();
if (SVT == Scalar.getOperand(0).getValueType())
Scalar = Scalar.getOperand(0);
}
// concat_vectors(scalar, undef) -> scalar_to_vector(scalar)
if (!Scalar.getValueType().isVector()) {
// If the bitcast type isn't legal, it might be a trunc of a legal type;
// look through the trunc so we can still do the transform:
// concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
if (Scalar->getOpcode() == ISD::TRUNCATE &&
!TLI.isTypeLegal(Scalar.getValueType()) &&
TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
Scalar = Scalar->getOperand(0);
EVT SclTy = Scalar.getValueType();
if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
return SDValue();
// Bail out if the vector size is not a multiple of the scalar size.
if (VT.getSizeInBits() % SclTy.getSizeInBits())
return SDValue();
unsigned VNTNumElms = VT.getSizeInBits() / SclTy.getSizeInBits();
if (VNTNumElms < 2)
return SDValue();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, VNTNumElms);
if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
return SDValue();
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar);
return DAG.getBitcast(VT, Res);
}
}
// Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR.
// We have already tested above for an UNDEF only concatenation.
// fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
// -> (BUILD_VECTOR A, B, ..., C, D, ...)
auto IsBuildVectorOrUndef = [](const SDValue &Op) {
return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
};
if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
SmallVector<SDValue, 8> Opnds;
EVT SVT = VT.getScalarType();
EVT MinVT = SVT;
if (!SVT.isFloatingPoint()) {
// If BUILD_VECTOR are from built from integer, they may have different
// operand types. Get the smallest type and truncate all operands to it.
bool FoundMinVT = false;
for (const SDValue &Op : N->ops())
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
EVT OpSVT = Op.getOperand(0).getValueType();
MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT;
FoundMinVT = true;
}
assert(FoundMinVT && "Concat vector type mismatch");
}
for (const SDValue &Op : N->ops()) {
EVT OpVT = Op.getValueType();
unsigned NumElts = OpVT.getVectorNumElements();
if (ISD::UNDEF == Op.getOpcode())
Opnds.append(NumElts, DAG.getUNDEF(MinVT));
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
if (SVT.isFloatingPoint()) {
assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch");
Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
} else {
for (unsigned i = 0; i != NumElts; ++i)
Opnds.push_back(
DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i)));
}
}
}
assert(VT.getVectorNumElements() == Opnds.size() &&
"Concat vector type mismatch");
return DAG.getBuildVector(VT, SDLoc(N), Opnds);
}
// Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
if (SDValue V = combineConcatVectorOfScalars(N, DAG))
return V;
// Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE.
if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
if (SDValue V = combineConcatVectorOfExtracts(N, DAG))
return V;
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes.
// Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
// place the incoming vectors at the exact same location.
SDValue SingleSource = SDValue();
unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements();
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Op = N->getOperand(i);
if (Op.isUndef())
continue;
// Check if this is the identity extract:
if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return SDValue();
// Find the single incoming vector for the extract_subvector.
if (SingleSource.getNode()) {
if (Op.getOperand(0) != SingleSource)
return SDValue();
} else {
SingleSource = Op.getOperand(0);
// Check the source type is the same as the type of the result.
// If not, this concat may extend the vector, so we can not
// optimize it away.
if (SingleSource.getValueType() != N->getValueType(0))
return SDValue();
}
auto *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1));
// The extract index must be constant.
if (!CS)
return SDValue();
// Check that we are reading from the identity index.
unsigned IdentityIndex = i * PartNumElem;
if (CS->getAPIntValue() != IdentityIndex)
return SDValue();
}
if (SingleSource.getNode())
return SingleSource;
return SDValue();
}
static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue BinOp = Extract->getOperand(0);
unsigned BinOpcode = BinOp.getOpcode();
if (!TLI.isBinOp(BinOpcode) || BinOp.getNode()->getNumValues() != 1)
return SDValue();
SDValue Bop0 = BinOp.getOperand(0), Bop1 = BinOp.getOperand(1);
SDValue Index = Extract->getOperand(1);
EVT VT = Extract->getValueType(0);
// Helper that peeks through INSERT_SUBVECTOR/CONCAT_VECTORS to find
// if the source subvector is the same type as the one being extracted.
auto GetSubVector = [VT, Index](SDValue V) -> SDValue {
if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
V.getOperand(1).getValueType() == VT && V.getOperand(2) == Index) {
return V.getOperand(1);
}
auto *IndexC = dyn_cast<ConstantSDNode>(Index);
if (IndexC && V.getOpcode() == ISD::CONCAT_VECTORS &&
V.getOperand(0).getValueType() == VT &&
(IndexC->getZExtValue() % VT.getVectorNumElements()) == 0) {
uint64_t SubIdx = IndexC->getZExtValue() / VT.getVectorNumElements();
return V.getOperand(SubIdx);
}
return SDValue();
};
SDValue Sub0 = GetSubVector(Bop0);
SDValue Sub1 = GetSubVector(Bop1);
// TODO: We could handle the case where only 1 operand is being inserted by
// creating an extract of the other operand, but that requires checking
// number of uses and/or costs.
if (!Sub0 || !Sub1 || !TLI.isOperationLegalOrCustom(BinOpcode, VT))
return SDValue();
// We are inserting both operands of the wide binop only to extract back
// to the narrow vector size. Eliminate all of the insert/extract:
// ext (binop (ins ?, X, Index), (ins ?, Y, Index)), Index --> binop X, Y
return DAG.getNode(BinOpcode, SDLoc(Extract), VT, Sub0, Sub1,
BinOp->getFlags());
}
/// If we are extracting a subvector produced by a wide binary operator try
/// to use a narrow binary operator and/or avoid concatenation and extraction.
static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG) {
// TODO: Refactor with the caller (visitEXTRACT_SUBVECTOR), so we can share
// some of these bailouts with other transforms.
if (SDValue V = narrowInsertExtractVectorBinOp(Extract, DAG))
return V;
// The extract index must be a constant, so we can map it to a concat operand.
auto *ExtractIndexC = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
if (!ExtractIndexC)
return SDValue();
// We are looking for an optionally bitcasted wide vector binary operator
// feeding an extract subvector.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue BinOp = peekThroughBitcasts(Extract->getOperand(0));
unsigned BOpcode = BinOp.getOpcode();
if (!TLI.isBinOp(BOpcode) || BinOp.getNode()->getNumValues() != 1)
return SDValue();
// The binop must be a vector type, so we can extract some fraction of it.
EVT WideBVT = BinOp.getValueType();
if (!WideBVT.isVector())
return SDValue();
EVT VT = Extract->getValueType(0);
unsigned ExtractIndex = ExtractIndexC->getZExtValue();
assert(ExtractIndex % VT.getVectorNumElements() == 0 &&
"Extract index is not a multiple of the vector length.");
// Bail out if this is not a proper multiple width extraction.
unsigned WideWidth = WideBVT.getSizeInBits();
unsigned NarrowWidth = VT.getSizeInBits();
if (WideWidth % NarrowWidth != 0)
return SDValue();
// Bail out if we are extracting a fraction of a single operation. This can
// occur because we potentially looked through a bitcast of the binop.
unsigned NarrowingRatio = WideWidth / NarrowWidth;
unsigned WideNumElts = WideBVT.getVectorNumElements();
if (WideNumElts % NarrowingRatio != 0)
return SDValue();
// Bail out if the target does not support a narrower version of the binop.
EVT NarrowBVT = EVT::getVectorVT(*DAG.getContext(), WideBVT.getScalarType(),
WideNumElts / NarrowingRatio);
if (!TLI.isOperationLegalOrCustomOrPromote(BOpcode, NarrowBVT))
return SDValue();
// If extraction is cheap, we don't need to look at the binop operands
// for concat ops. The narrow binop alone makes this transform profitable.
// We can't just reuse the original extract index operand because we may have
// bitcasted.
unsigned ConcatOpNum = ExtractIndex / VT.getVectorNumElements();
unsigned ExtBOIdx = ConcatOpNum * NarrowBVT.getVectorNumElements();
EVT ExtBOIdxVT = Extract->getOperand(1).getValueType();
if (TLI.isExtractSubvectorCheap(NarrowBVT, WideBVT, ExtBOIdx) &&
BinOp.hasOneUse() && Extract->getOperand(0)->hasOneUse()) {
// extract (binop B0, B1), N --> binop (extract B0, N), (extract B1, N)
SDLoc DL(Extract);
SDValue NewExtIndex = DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT);
SDValue X = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
BinOp.getOperand(0), NewExtIndex);
SDValue Y = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
BinOp.getOperand(1), NewExtIndex);
SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y,
BinOp.getNode()->getFlags());
return DAG.getBitcast(VT, NarrowBinOp);
}
// Only handle the case where we are doubling and then halving. A larger ratio
// may require more than two narrow binops to replace the wide binop.
if (NarrowingRatio != 2)
return SDValue();
// TODO: The motivating case for this transform is an x86 AVX1 target. That
// target has temptingly almost legal versions of bitwise logic ops in 256-bit
// flavors, but no other 256-bit integer support. This could be extended to
// handle any binop, but that may require fixing/adding other folds to avoid
// codegen regressions.
if (BOpcode != ISD::AND && BOpcode != ISD::OR && BOpcode != ISD::XOR)
return SDValue();
// We need at least one concatenation operation of a binop operand to make
// this transform worthwhile. The concat must double the input vector sizes.
auto GetSubVector = [ConcatOpNum](SDValue V) -> SDValue {
if (V.getOpcode() == ISD::CONCAT_VECTORS && V.getNumOperands() == 2)
return V.getOperand(ConcatOpNum);
return SDValue();
};
SDValue SubVecL = GetSubVector(peekThroughBitcasts(BinOp.getOperand(0)));
SDValue SubVecR = GetSubVector(peekThroughBitcasts(BinOp.getOperand(1)));
if (SubVecL || SubVecR) {
// If a binop operand was not the result of a concat, we must extract a
// half-sized operand for our new narrow binop:
// extract (binop (concat X1, X2), (concat Y1, Y2)), N --> binop XN, YN
// extract (binop (concat X1, X2), Y), N --> binop XN, (extract Y, IndexC)
// extract (binop X, (concat Y1, Y2)), N --> binop (extract X, IndexC), YN
SDLoc DL(Extract);
SDValue IndexC = DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT);
SDValue X = SubVecL ? DAG.getBitcast(NarrowBVT, SubVecL)
: DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
BinOp.getOperand(0), IndexC);
SDValue Y = SubVecR ? DAG.getBitcast(NarrowBVT, SubVecR)
: DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
BinOp.getOperand(1), IndexC);
SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y);
return DAG.getBitcast(VT, NarrowBinOp);
}
return SDValue();
}
/// If we are extracting a subvector from a wide vector load, convert to a
/// narrow load to eliminate the extraction:
/// (extract_subvector (load wide vector)) --> (load narrow vector)
static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
// TODO: Add support for big-endian. The offset calculation must be adjusted.
if (DAG.getDataLayout().isBigEndian())
return SDValue();
auto *Ld = dyn_cast<LoadSDNode>(Extract->getOperand(0));
auto *ExtIdx = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
if (!Ld || Ld->getExtensionType() || Ld->isVolatile() || !ExtIdx)
return SDValue();
// Allow targets to opt-out.
EVT VT = Extract->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.shouldReduceLoadWidth(Ld, Ld->getExtensionType(), VT))
return SDValue();
// The narrow load will be offset from the base address of the old load if
// we are extracting from something besides index 0 (little-endian).
SDLoc DL(Extract);
SDValue BaseAddr = Ld->getOperand(1);
unsigned Offset = ExtIdx->getZExtValue() * VT.getScalarType().getStoreSize();
// TODO: Use "BaseIndexOffset" to make this more effective.
SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset,
VT.getStoreSize());
SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
return NewLd;
}
SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
EVT NVT = N->getValueType(0);
SDValue V = N->getOperand(0);
// Extract from UNDEF is UNDEF.
if (V.isUndef())
return DAG.getUNDEF(NVT);
if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
if (SDValue NarrowLoad = narrowExtractedVectorLoad(N, DAG))
return NarrowLoad;
// Combine an extract of an extract into a single extract_subvector.
// ext (ext X, C), 0 --> ext X, C
SDValue Index = N->getOperand(1);
if (isNullConstant(Index) && V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
V.hasOneUse() && isa<ConstantSDNode>(V.getOperand(1))) {
if (TLI.isExtractSubvectorCheap(NVT, V.getOperand(0).getValueType(),
V.getConstantOperandVal(1)) &&
TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NVT)) {
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, V.getOperand(0),
V.getOperand(1));
}
}
// Try to move vector bitcast after extract_subv by scaling extraction index:
// extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
if (isa<ConstantSDNode>(Index) && V.getOpcode() == ISD::BITCAST &&
V.getOperand(0).getValueType().isVector()) {
SDValue SrcOp = V.getOperand(0);
EVT SrcVT = SrcOp.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
unsigned DestNumElts = V.getValueType().getVectorNumElements();
if ((SrcNumElts % DestNumElts) == 0) {
unsigned SrcDestRatio = SrcNumElts / DestNumElts;
unsigned NewExtNumElts = NVT.getVectorNumElements() * SrcDestRatio;
EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
NewExtNumElts);
if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
unsigned IndexValScaled = N->getConstantOperandVal(1) * SrcDestRatio;
SDLoc DL(N);
SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
V.getOperand(0), NewIndex);
return DAG.getBitcast(NVT, NewExtract);
}
}
// TODO - handle (DestNumElts % SrcNumElts) == 0
}
// Combine:
// (extract_subvec (concat V1, V2, ...), i)
// Into:
// Vi if possible
// Only operand 0 is checked as 'concat' assumes all inputs of the same
// type.
if (V.getOpcode() == ISD::CONCAT_VECTORS && isa<ConstantSDNode>(Index) &&
V.getOperand(0).getValueType() == NVT) {
unsigned Idx = N->getConstantOperandVal(1);
unsigned NumElems = NVT.getVectorNumElements();
assert((Idx % NumElems) == 0 &&
"IDX in concat is not a multiple of the result vector length.");
return V->getOperand(Idx / NumElems);
}
V = peekThroughBitcasts(V);
// If the input is a build vector. Try to make a smaller build vector.
if (V.getOpcode() == ISD::BUILD_VECTOR) {
if (auto *IdxC = dyn_cast<ConstantSDNode>(Index)) {
EVT InVT = V.getValueType();
unsigned ExtractSize = NVT.getSizeInBits();
unsigned EltSize = InVT.getScalarSizeInBits();
// Only do this if we won't split any elements.
if (ExtractSize % EltSize == 0) {
unsigned NumElems = ExtractSize / EltSize;
EVT EltVT = InVT.getVectorElementType();
EVT ExtractVT = NumElems == 1 ? EltVT
: EVT::getVectorVT(*DAG.getContext(),
EltVT, NumElems);
if ((Level < AfterLegalizeDAG ||
(NumElems == 1 ||
TLI.isOperationLegal(ISD::BUILD_VECTOR, ExtractVT))) &&
(!LegalTypes || TLI.isTypeLegal(ExtractVT))) {
unsigned IdxVal = IdxC->getZExtValue();
IdxVal *= NVT.getScalarSizeInBits();
IdxVal /= EltSize;
if (NumElems == 1) {
SDValue Src = V->getOperand(IdxVal);
if (EltVT != Src.getValueType())
Src = DAG.getNode(ISD::TRUNCATE, SDLoc(N), InVT, Src);
return DAG.getBitcast(NVT, Src);
}
// Extract the pieces from the original build_vector.
SDValue BuildVec = DAG.getBuildVector(
ExtractVT, SDLoc(N), V->ops().slice(IdxVal, NumElems));
return DAG.getBitcast(NVT, BuildVec);
}
}
}
}
if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
// Handle only simple case where vector being inserted and vector
// being extracted are of same size.
EVT SmallVT = V.getOperand(1).getValueType();
if (!NVT.bitsEq(SmallVT))
return SDValue();
// Only handle cases where both indexes are constants.
auto *ExtIdx = dyn_cast<ConstantSDNode>(Index);
auto *InsIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
if (InsIdx && ExtIdx) {
// Combine:
// (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
// Into:
// indices are equal or bit offsets are equal => V1
// otherwise => (extract_subvec V1, ExtIdx)
if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() ==
ExtIdx->getZExtValue() * NVT.getScalarSizeInBits())
return DAG.getBitcast(NVT, V.getOperand(1));
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT,
DAG.getBitcast(N->getOperand(0).getValueType(), V.getOperand(0)),
Index);
}
}
if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG))
return NarrowBOp;
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
/// Try to convert a wide shuffle of concatenated vectors into 2 narrow shuffles
/// followed by concatenation. Narrow vector ops may have better performance
/// than wide ops, and this can unlock further narrowing of other vector ops.
/// Targets can invert this transform later if it is not profitable.
static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
SelectionDAG &DAG) {
SDValue N0 = Shuf->getOperand(0), N1 = Shuf->getOperand(1);
if (N0.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
N1.getOpcode() != ISD::CONCAT_VECTORS || N1.getNumOperands() != 2 ||
!N0.getOperand(1).isUndef() || !N1.getOperand(1).isUndef())
return SDValue();
// Split the wide shuffle mask into halves. Any mask element that is accessing
// operand 1 is offset down to account for narrowing of the vectors.
ArrayRef<int> Mask = Shuf->getMask();
EVT VT = Shuf->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
SmallVector<int, 16> Mask0(HalfNumElts, -1);
SmallVector<int, 16> Mask1(HalfNumElts, -1);
for (unsigned i = 0; i != NumElts; ++i) {
if (Mask[i] == -1)
continue;
int M = Mask[i] < (int)NumElts ? Mask[i] : Mask[i] - (int)HalfNumElts;
if (i < HalfNumElts)
Mask0[i] = M;
else
Mask1[i - HalfNumElts] = M;
}
// Ask the target if this is a valid transform.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
HalfNumElts);
if (!TLI.isShuffleMaskLegal(Mask0, HalfVT) ||
!TLI.isShuffleMaskLegal(Mask1, HalfVT))
return SDValue();
// shuffle (concat X, undef), (concat Y, undef), Mask -->
// concat (shuffle X, Y, Mask0), (shuffle X, Y, Mask1)
SDValue X = N0.getOperand(0), Y = N1.getOperand(0);
SDLoc DL(Shuf);
SDValue Shuf0 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask0);
SDValue Shuf1 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask1);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Shuf0, Shuf1);
}
// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat,
// or turn a shuffle of a single concat into simpler shuffle then concat.
static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
ArrayRef<int> Mask = SVN->getMask();
SmallVector<SDValue, 4> Ops;
EVT ConcatVT = N0.getOperand(0).getValueType();
unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
unsigned NumConcats = NumElts / NumElemsPerConcat;
auto IsUndefMaskElt = [](int i) { return i == -1; };
// Special case: shuffle(concat(A,B)) can be more efficiently represented
// as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
// half vector elements.
if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
llvm::all_of(Mask.slice(NumElemsPerConcat, NumElemsPerConcat),
IsUndefMaskElt)) {
N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0),
N0.getOperand(1),
Mask.slice(0, NumElemsPerConcat));
N1 = DAG.getUNDEF(ConcatVT);
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1);
}
// Look at every vector that's inserted. We're looking for exact
// subvector-sized copies from a concatenated vector
for (unsigned I = 0; I != NumConcats; ++I) {
unsigned Begin = I * NumElemsPerConcat;
ArrayRef<int> SubMask = Mask.slice(Begin, NumElemsPerConcat);
// Make sure we're dealing with a copy.
if (llvm::all_of(SubMask, IsUndefMaskElt)) {
Ops.push_back(DAG.getUNDEF(ConcatVT));
continue;
}
int OpIdx = -1;
for (int i = 0; i != (int)NumElemsPerConcat; ++i) {
if (IsUndefMaskElt(SubMask[i]))
continue;
if ((SubMask[i] % (int)NumElemsPerConcat) != i)
return SDValue();
int EltOpIdx = SubMask[i] / NumElemsPerConcat;
if (0 <= OpIdx && EltOpIdx != OpIdx)
return SDValue();
OpIdx = EltOpIdx;
}
assert(0 <= OpIdx && "Unknown concat_vectors op");
if (OpIdx < (int)N0.getNumOperands())
Ops.push_back(N0.getOperand(OpIdx));
else
Ops.push_back(N1.getOperand(OpIdx - N0.getNumOperands()));
}
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
}
// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
//
// SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always
// a simplification in some sense, but it isn't appropriate in general: some
// BUILD_VECTORs are substantially cheaper than others. The general case
// of a BUILD_VECTOR requires inserting each element individually (or
// performing the equivalent in a temporary stack variable). A BUILD_VECTOR of
// all constants is a single constant pool load. A BUILD_VECTOR where each
// element is identical is a splat. A BUILD_VECTOR where most of the operands
// are undef lowers to a small number of element insertions.
//
// To deal with this, we currently use a bunch of mostly arbitrary heuristics.
// We don't fold shuffles where one side is a non-zero constant, and we don't
// fold shuffles if the resulting (non-splat) BUILD_VECTOR would have duplicate
// non-constant operands. This seems to work out reasonably well in practice.
static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG,
const TargetLowering &TLI) {
EVT VT = SVN->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = SVN->getOperand(0);
SDValue N1 = SVN->getOperand(1);
if (!N0->hasOneUse())
return SDValue();
// If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
// discussed above.
if (!N1.isUndef()) {
if (!N1->hasOneUse())
return SDValue();
bool N0AnyConst = isAnyConstantBuildVector(N0);
bool N1AnyConst = isAnyConstantBuildVector(N1);
if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode()))
return SDValue();
if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode()))
return SDValue();
}
// If both inputs are splats of the same value then we can safely merge this
// to a single BUILD_VECTOR with undef elements based on the shuffle mask.
bool IsSplat = false;
auto *BV0 = dyn_cast<BuildVectorSDNode>(N0);
auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
if (BV0 && BV1)
if (SDValue Splat0 = BV0->getSplatValue())
IsSplat = (Splat0 == BV1->getSplatValue());
SmallVector<SDValue, 8> Ops;
SmallSet<SDValue, 16> DuplicateOps;
for (int M : SVN->getMask()) {
SDValue Op = DAG.getUNDEF(VT.getScalarType());
if (M >= 0) {
int Idx = M < (int)NumElts ? M : M - NumElts;
SDValue &S = (M < (int)NumElts ? N0 : N1);
if (S.getOpcode() == ISD::BUILD_VECTOR) {
Op = S.getOperand(Idx);
} else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) {
SDValue Op0 = S.getOperand(0);
Op = Idx == 0 ? Op0 : DAG.getUNDEF(Op0.getValueType());
} else {
// Operand can't be combined - bail out.
return SDValue();
}
}
// Don't duplicate a non-constant BUILD_VECTOR operand unless we're
// generating a splat; semantically, this is fine, but it's likely to
// generate low-quality code if the target can't reconstruct an appropriate
// shuffle.
if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op))
if (!IsSplat && !DuplicateOps.insert(Op).second)
return SDValue();
Ops.push_back(Op);
}
// BUILD_VECTOR requires all inputs to be of the same type, find the
// maximum type and extend them all.
EVT SVT = VT.getScalarType();
if (SVT.isInteger())
for (SDValue &Op : Ops)
SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
if (SVT != VT.getScalarType())
for (SDValue &Op : Ops)
Op = TLI.isZExtFree(Op.getValueType(), SVT)
? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
: DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT);
return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
}
// Match shuffles that can be converted to any_vector_extend_in_reg.
// This is often generated during legalization.
// e.g. v4i32 <0,u,1,u> -> (v2i64 any_vector_extend_in_reg(v4i32 src))
// TODO Add support for ZERO_EXTEND_VECTOR_INREG when we have a test case.
static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG,
const TargetLowering &TLI,
bool LegalOperations) {
EVT VT = SVN->getValueType(0);
bool IsBigEndian = DAG.getDataLayout().isBigEndian();
// TODO Add support for big-endian when we have a test case.
if (!VT.isInteger() || IsBigEndian)
return SDValue();
unsigned NumElts = VT.getVectorNumElements();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
ArrayRef<int> Mask = SVN->getMask();
SDValue N0 = SVN->getOperand(0);
// shuffle<0,-1,1,-1> == (v2i64 anyextend_vector_inreg(v4i32))
auto isAnyExtend = [&Mask, &NumElts](unsigned Scale) {
for (unsigned i = 0; i != NumElts; ++i) {
if (Mask[i] < 0)
continue;
if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale))
continue;
return false;
}
return true;
};
// Attempt to match a '*_extend_vector_inreg' shuffle, we just search for
// power-of-2 extensions as they are the most likely.
for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) {
// Check for non power of 2 vector sizes
if (NumElts % Scale != 0)
continue;
if (!isAnyExtend(Scale))
continue;
EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale);
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale);
// Never create an illegal type. Only create unsupported operations if we
// are pre-legalization.
if (TLI.isTypeLegal(OutVT))
if (!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT))
return DAG.getBitcast(VT,
DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG,
SDLoc(SVN), OutVT, N0));
}
return SDValue();
}
// Detect 'truncate_vector_inreg' style shuffles that pack the lower parts of
// each source element of a large type into the lowest elements of a smaller
// destination type. This is often generated during legalization.
// If the source node itself was a '*_extend_vector_inreg' node then we should
// then be able to remove it.
static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG) {
EVT VT = SVN->getValueType(0);
bool IsBigEndian = DAG.getDataLayout().isBigEndian();
// TODO Add support for big-endian when we have a test case.
if (!VT.isInteger() || IsBigEndian)
return SDValue();
SDValue N0 = peekThroughBitcasts(SVN->getOperand(0));
unsigned Opcode = N0.getOpcode();
if (Opcode != ISD::ANY_EXTEND_VECTOR_INREG &&
Opcode != ISD::SIGN_EXTEND_VECTOR_INREG &&
Opcode != ISD::ZERO_EXTEND_VECTOR_INREG)
return SDValue();
SDValue N00 = N0.getOperand(0);
ArrayRef<int> Mask = SVN->getMask();
unsigned NumElts = VT.getVectorNumElements();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
unsigned ExtSrcSizeInBits = N00.getScalarValueSizeInBits();
unsigned ExtDstSizeInBits = N0.getScalarValueSizeInBits();
if (ExtDstSizeInBits % ExtSrcSizeInBits != 0)
return SDValue();
unsigned ExtScale = ExtDstSizeInBits / ExtSrcSizeInBits;
// (v4i32 truncate_vector_inreg(v2i64)) == shuffle<0,2-1,-1>
// (v8i16 truncate_vector_inreg(v4i32)) == shuffle<0,2,4,6,-1,-1,-1,-1>
// (v8i16 truncate_vector_inreg(v2i64)) == shuffle<0,4,-1,-1,-1,-1,-1,-1>
auto isTruncate = [&Mask, &NumElts](unsigned Scale) {
for (unsigned i = 0; i != NumElts; ++i) {
if (Mask[i] < 0)
continue;
if ((i * Scale) < NumElts && Mask[i] == (int)(i * Scale))
continue;
return false;
}
return true;
};
// At the moment we just handle the case where we've truncated back to the
// same size as before the extension.
// TODO: handle more extension/truncation cases as cases arise.
if (EltSizeInBits != ExtSrcSizeInBits)
return SDValue();
// We can remove *extend_vector_inreg only if the truncation happens at
// the same scale as the extension.
if (isTruncate(ExtScale))
return DAG.getBitcast(VT, N00);
return SDValue();
}
// Combine shuffles of splat-shuffles of the form:
// shuffle (shuffle V, undef, splat-mask), undef, M
// If splat-mask contains undef elements, we need to be careful about
// introducing undef's in the folded mask which are not the result of composing
// the masks of the shuffles.
static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
SelectionDAG &DAG) {
if (!Shuf->getOperand(1).isUndef())
return SDValue();
auto *Splat = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
if (!Splat || !Splat->isSplat())
return SDValue();
ArrayRef<int> ShufMask = Shuf->getMask();
ArrayRef<int> SplatMask = Splat->getMask();
assert(ShufMask.size() == SplatMask.size() && "Mask length mismatch");
// Prefer simplifying to the splat-shuffle, if possible. This is legal if
// every undef mask element in the splat-shuffle has a corresponding undef
// element in the user-shuffle's mask or if the composition of mask elements
// would result in undef.
// Examples for (shuffle (shuffle v, undef, SplatMask), undef, UserMask):
// * UserMask=[0,2,u,u], SplatMask=[2,u,2,u] -> [2,2,u,u]
// In this case it is not legal to simplify to the splat-shuffle because we
// may be exposing the users of the shuffle an undef element at index 1
// which was not there before the combine.
// * UserMask=[0,u,2,u], SplatMask=[2,u,2,u] -> [2,u,2,u]
// In this case the composition of masks yields SplatMask, so it's ok to
// simplify to the splat-shuffle.
// * UserMask=[3,u,2,u], SplatMask=[2,u,2,u] -> [u,u,2,u]
// In this case the composed mask includes all undef elements of SplatMask
// and in addition sets element zero to undef. It is safe to simplify to
// the splat-shuffle.
auto CanSimplifyToExistingSplat = [](ArrayRef<int> UserMask,
ArrayRef<int> SplatMask) {
for (unsigned i = 0, e = UserMask.size(); i != e; ++i)
if (UserMask[i] != -1 && SplatMask[i] == -1 &&
SplatMask[UserMask[i]] != -1)
return false;
return true;
};
if (CanSimplifyToExistingSplat(ShufMask, SplatMask))
return Shuf->getOperand(0);
// Create a new shuffle with a mask that is composed of the two shuffles'
// masks.
SmallVector<int, 32> NewMask;
for (int Idx : ShufMask)
NewMask.push_back(Idx == -1 ? -1 : SplatMask[Idx]);
return DAG.getVectorShuffle(Splat->getValueType(0), SDLoc(Splat),
Splat->getOperand(0), Splat->getOperand(1),
NewMask);
}
/// If the shuffle mask is taking exactly one element from the first vector
/// operand and passing through all other elements from the second vector
/// operand, return the index of the mask element that is choosing an element
/// from the first operand. Otherwise, return -1.
static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef<int> Mask) {
int MaskSize = Mask.size();
int EltFromOp0 = -1;
// TODO: This does not match if there are undef elements in the shuffle mask.
// Should we ignore undefs in the shuffle mask instead? The trade-off is
// removing an instruction (a shuffle), but losing the knowledge that some
// vector lanes are not needed.
for (int i = 0; i != MaskSize; ++i) {
if (Mask[i] >= 0 && Mask[i] < MaskSize) {
// We're looking for a shuffle of exactly one element from operand 0.
if (EltFromOp0 != -1)
return -1;
EltFromOp0 = i;
} else if (Mask[i] != i + MaskSize) {
// Nothing from operand 1 can change lanes.
return -1;
}
}
return EltFromOp0;
}
/// If a shuffle inserts exactly one element from a source vector operand into
/// another vector operand and we can access the specified element as a scalar,
/// then we can eliminate the shuffle.
static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
SelectionDAG &DAG) {
// First, check if we are taking one element of a vector and shuffling that
// element into another vector.
ArrayRef<int> Mask = Shuf->getMask();
SmallVector<int, 16> CommutedMask(Mask.begin(), Mask.end());
SDValue Op0 = Shuf->getOperand(0);
SDValue Op1 = Shuf->getOperand(1);
int ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(Mask);
if (ShufOp0Index == -1) {
// Commute mask and check again.
ShuffleVectorSDNode::commuteMask(CommutedMask);
ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(CommutedMask);
if (ShufOp0Index == -1)
return SDValue();
// Commute operands to match the commuted shuffle mask.
std::swap(Op0, Op1);
Mask = CommutedMask;
}
// The shuffle inserts exactly one element from operand 0 into operand 1.
// Now see if we can access that element as a scalar via a real insert element
// instruction.
// TODO: We can try harder to locate the element as a scalar. Examples: it
// could be an operand of SCALAR_TO_VECTOR, BUILD_VECTOR, or a constant.
assert(Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() &&
"Shuffle mask value must be from operand 0");
if (Op0.getOpcode() != ISD::INSERT_VECTOR_ELT)
return SDValue();
auto *InsIndexC = dyn_cast<ConstantSDNode>(Op0.getOperand(2));
if (!InsIndexC || InsIndexC->getSExtValue() != Mask[ShufOp0Index])
return SDValue();
// There's an existing insertelement with constant insertion index, so we
// don't need to check the legality/profitability of a replacement operation
// that differs at most in the constant value. The target should be able to
// lower any of those in a similar way. If not, legalization will expand this
// to a scalar-to-vector plus shuffle.
//
// Note that the shuffle may move the scalar from the position that the insert
// element used. Therefore, our new insert element occurs at the shuffle's
// mask index value, not the insert's index value.
// shuffle (insertelt v1, x, C), v2, mask --> insertelt v2, x, C'
SDValue NewInsIndex = DAG.getConstant(ShufOp0Index, SDLoc(Shuf),
Op0.getOperand(2).getValueType());
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Shuf), Op0.getValueType(),
Op1, Op0.getOperand(1), NewInsIndex);
}
/// If we have a unary shuffle of a shuffle, see if it can be folded away
/// completely. This has the potential to lose undef knowledge because the first
/// shuffle may not have an undef mask element where the second one does. So
/// only call this after doing simplifications based on demanded elements.
static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {
// shuf (shuf0 X, Y, Mask0), undef, Mask
auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
if (!Shuf0 || !Shuf->getOperand(1).isUndef())
return SDValue();
ArrayRef<int> Mask = Shuf->getMask();
ArrayRef<int> Mask0 = Shuf0->getMask();
for (int i = 0, e = (int)Mask.size(); i != e; ++i) {
// Ignore undef elements.
if (Mask[i] == -1)
continue;
assert(Mask[i] >= 0 && Mask[i] < e && "Unexpected shuffle mask value");
// Is the element of the shuffle operand chosen by this shuffle the same as
// the element chosen by the shuffle operand itself?
if (Mask0[Mask[i]] != Mask0[i])
return SDValue();
}
// Every element of this shuffle is identical to the result of the previous
// shuffle, so we can replace this value.
return Shuf->getOperand(0);
}
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
// Canonicalize shuffle undef, undef -> undef
if (N0.isUndef() && N1.isUndef())
return DAG.getUNDEF(VT);
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
// Canonicalize shuffle v, v -> v, undef
if (N0 == N1) {
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx >= (int)NumElts) Idx -= NumElts;
NewMask.push_back(Idx);
}
return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask);
}
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
if (N0.isUndef())
return DAG.getCommutedVectorShuffle(*SVN);
// Remove references to rhs if it is undef
if (N1.isUndef()) {
bool Changed = false;
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx >= (int)NumElts) {
Idx = -1;
Changed = true;
}
NewMask.push_back(Idx);
}
if (Changed)
return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask);
}
if (SDValue InsElt = replaceShuffleOfInsert(SVN, DAG))
return InsElt;
// A shuffle of a single vector that is a splatted value can always be folded.
if (SDValue V = combineShuffleOfSplatVal(SVN, DAG))
return V;
// If it is a splat, check if the argument vector is another splat or a
// build_vector.
if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
int SplatIndex = SVN->getSplatIndex();
- if (TLI.isExtractVecEltCheap(VT, SplatIndex) &&
+ if (N0.hasOneUse() && TLI.isExtractVecEltCheap(VT, SplatIndex) &&
TLI.isBinOp(N0.getOpcode()) && N0.getNode()->getNumValues() == 1) {
// splat (vector_bo L, R), Index -->
// splat (scalar_bo (extelt L, Index), (extelt R, Index))
SDValue L = N0.getOperand(0), R = N0.getOperand(1);
SDLoc DL(N);
EVT EltVT = VT.getScalarType();
SDValue Index = DAG.getIntPtrConstant(SplatIndex, DL);
SDValue ExtL = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, L, Index);
SDValue ExtR = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, R, Index);
SDValue NewBO = DAG.getNode(N0.getOpcode(), DL, EltVT, ExtL, ExtR,
N0.getNode()->getFlags());
SDValue Insert = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, NewBO);
SmallVector<int, 16> ZeroMask(VT.getVectorNumElements(), 0);
return DAG.getVectorShuffle(VT, DL, Insert, DAG.getUNDEF(VT), ZeroMask);
}
// If this is a bit convert that changes the element type of the vector but
// not the number of vector elements, look through it. Be careful not to
// look though conversions that change things like v4f32 to v2f64.
SDNode *V = N0.getNode();
if (V->getOpcode() == ISD::BITCAST) {
SDValue ConvInput = V->getOperand(0);
if (ConvInput.getValueType().isVector() &&
ConvInput.getValueType().getVectorNumElements() == NumElts)
V = ConvInput.getNode();
}
if (V->getOpcode() == ISD::BUILD_VECTOR) {
assert(V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands");
SDValue Base;
bool AllSame = true;
for (unsigned i = 0; i != NumElts; ++i) {
if (!V->getOperand(i).isUndef()) {
Base = V->getOperand(i);
break;
}
}
// Splat of <u, u, u, u>, return <u, u, u, u>
if (!Base.getNode())
return N0;
for (unsigned i = 0; i != NumElts; ++i) {
if (V->getOperand(i) != Base) {
AllSame = false;
break;
}
}
// Splat of <x, x, x, x>, return <x, x, x, x>
if (AllSame)
return N0;
// Canonicalize any other splat as a build_vector.
SDValue Splatted = V->getOperand(SplatIndex);
SmallVector<SDValue, 8> Ops(NumElts, Splatted);
SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops);
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
if (V->getValueType(0) != VT)
NewBV = DAG.getBitcast(VT, NewBV);
return NewBV;
}
}
// Simplify source operands based on shuffle mask.
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
return SDValue(N, 0);
// This is intentionally placed after demanded elements simplification because
// it could eliminate knowledge of undef elements created by this shuffle.
if (SDValue ShufOp = simplifyShuffleOfShuffle(SVN))
return ShufOp;
// Match shuffles that can be converted to any_vector_extend_in_reg.
if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations))
return V;
// Combine "truncate_vector_in_reg" style shuffles.
if (SDValue V = combineTruncationShuffle(SVN, DAG))
return V;
if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
Level < AfterLegalizeVectorOps &&
(N1.isUndef() ||
(N1.getOpcode() == ISD::CONCAT_VECTORS &&
N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
if (SDValue V = partitionShuffleOfConcats(N, DAG))
return V;
}
// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT))
if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI))
return Res;
// If this shuffle only has a single input that is a bitcasted shuffle,
// attempt to merge the 2 shuffles and suitably bitcast the inputs/output
// back to their original types.
if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
N1.isUndef() && Level < AfterLegalizeVectorOps &&
TLI.isTypeLegal(VT)) {
auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) {
if (Scale == 1)
return SmallVector<int, 8>(Mask.begin(), Mask.end());
SmallVector<int, 8> NewMask;
for (int M : Mask)
for (int s = 0; s != Scale; ++s)
NewMask.push_back(M < 0 ? -1 : Scale * M + s);
return NewMask;
};
SDValue BC0 = peekThroughOneUseBitcasts(N0);
if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) {
EVT SVT = VT.getScalarType();
EVT InnerVT = BC0->getValueType(0);
EVT InnerSVT = InnerVT.getScalarType();
// Determine which shuffle works with the smaller scalar type.
EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT;
EVT ScaleSVT = ScaleVT.getScalarType();
if (TLI.isTypeLegal(ScaleVT) &&
0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) &&
0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) {
int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits();
int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits();
// Scale the shuffle masks to the smaller scalar type.
ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
SmallVector<int, 8> InnerMask =
ScaleShuffleMask(InnerSVN->getMask(), InnerScale);
SmallVector<int, 8> OuterMask =
ScaleShuffleMask(SVN->getMask(), OuterScale);
// Merge the shuffle masks.
SmallVector<int, 8> NewMask;
for (int M : OuterMask)
NewMask.push_back(M < 0 ? -1 : InnerMask[M]);
// Test for shuffle mask legality over both commutations.
SDValue SV0 = BC0->getOperand(0);
SDValue SV1 = BC0->getOperand(1);
bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
if (!LegalMask) {
std::swap(SV0, SV1);
ShuffleVectorSDNode::commuteMask(NewMask);
LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
}
if (LegalMask) {
SV0 = DAG.getBitcast(ScaleVT, SV0);
SV1 = DAG.getBitcast(ScaleVT, SV1);
return DAG.getBitcast(
VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
}
}
}
}
// Canonicalize shuffles according to rules:
// shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
// shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
// shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
TLI.isTypeLegal(VT)) {
// The incoming shuffle must be of the same type as the result of the
// current shuffle.
assert(N1->getOperand(0).getValueType() == VT &&
"Shuffle types don't match");
SDValue SV0 = N1->getOperand(0);
SDValue SV1 = N1->getOperand(1);
bool HasSameOp0 = N0 == SV0;
bool IsSV1Undef = SV1.isUndef();
if (HasSameOp0 || IsSV1Undef || N0 == SV1)
// Commute the operands of this shuffle so that next rule
// will trigger.
return DAG.getCommutedVectorShuffle(*SVN);
}
// Try to fold according to rules:
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
// Don't try to fold shuffles with illegal type.
// Only fold if this shuffle is the only user of the other shuffle.
if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
// Don't try to fold splats; they're likely to simplify somehow, or they
// might be free.
if (OtherSV->isSplat())
return SDValue();
// The incoming shuffle must be of the same type as the result of the
// current shuffle.
assert(OtherSV->getOperand(0).getValueType() == VT &&
"Shuffle types don't match");
SDValue SV0, SV1;
SmallVector<int, 4> Mask;
// Compute the combined shuffle mask for a shuffle with SV0 as the first
// operand, and SV1 as the second operand.
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx < 0) {
// Propagate Undef.
Mask.push_back(Idx);
continue;
}
SDValue CurrentVec;
if (Idx < (int)NumElts) {
// This shuffle index refers to the inner shuffle N0. Lookup the inner
// shuffle mask to identify which vector is actually referenced.
Idx = OtherSV->getMaskElt(Idx);
if (Idx < 0) {
// Propagate Undef.
Mask.push_back(Idx);
continue;
}
CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0)
: OtherSV->getOperand(1);
} else {
// This shuffle index references an element within N1.
CurrentVec = N1;
}
// Simple case where 'CurrentVec' is UNDEF.
if (CurrentVec.isUndef()) {
Mask.push_back(-1);
continue;
}
// Canonicalize the shuffle index. We don't know yet if CurrentVec
// will be the first or second operand of the combined shuffle.
Idx = Idx % NumElts;
if (!SV0.getNode() || SV0 == CurrentVec) {
// Ok. CurrentVec is the left hand side.
// Update the mask accordingly.
SV0 = CurrentVec;
Mask.push_back(Idx);
continue;
}
// Bail out if we cannot convert the shuffle pair into a single shuffle.
if (SV1.getNode() && SV1 != CurrentVec)
return SDValue();
// Ok. CurrentVec is the right hand side.
// Update the mask accordingly.
SV1 = CurrentVec;
Mask.push_back(Idx + NumElts);
}
// Check if all indices in Mask are Undef. In case, propagate Undef.
bool isUndefMask = true;
for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
isUndefMask &= Mask[i] < 0;
if (isUndefMask)
return DAG.getUNDEF(VT);
if (!SV0.getNode())
SV0 = DAG.getUNDEF(VT);
if (!SV1.getNode())
SV1 = DAG.getUNDEF(VT);
// Avoid introducing shuffles with illegal mask.
if (!TLI.isShuffleMaskLegal(Mask, VT)) {
ShuffleVectorSDNode::commuteMask(Mask);
if (!TLI.isShuffleMaskLegal(Mask, VT))
return SDValue();
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
std::swap(SV0, SV1);
}
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask);
}
if (SDValue V = foldShuffleOfConcatUndefs(SVN, DAG))
return V;
return SDValue();
}
SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
SDValue InVal = N->getOperand(0);
EVT VT = N->getValueType(0);
// Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
// with a VECTOR_SHUFFLE and possible truncate.
if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue InVec = InVal->getOperand(0);
SDValue EltNo = InVal->getOperand(1);
auto InVecT = InVec.getValueType();
if (ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo)) {
SmallVector<int, 8> NewMask(InVecT.getVectorNumElements(), -1);
int Elt = C0->getZExtValue();
NewMask[0] = Elt;
SDValue Val;
// If we have an implict truncate do truncate here as long as it's legal.
// if it's not legal, this should
if (VT.getScalarType() != InVal.getValueType() &&
InVal.getValueType().isScalarInteger() &&
isTypeLegal(VT.getScalarType())) {
Val =
DAG.getNode(ISD::TRUNCATE, SDLoc(InVal), VT.getScalarType(), InVal);
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Val);
}
if (VT.getScalarType() == InVecT.getScalarType() &&
VT.getVectorNumElements() <= InVecT.getVectorNumElements() &&
TLI.isShuffleMaskLegal(NewMask, VT)) {
Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec,
DAG.getUNDEF(InVecT), NewMask);
// If the initial vector is the correct size this shuffle is a
// valid result.
if (VT == InVecT)
return Val;
// If not we must truncate the vector.
if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) {
MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy);
EVT SubVT =
EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(),
VT.getVectorNumElements());
Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val,
ZeroIdx);
return Val;
}
}
}
}
return SDValue();
}
SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
// If inserting an UNDEF, just return the original vector.
if (N1.isUndef())
return N0;
// If this is an insert of an extracted vector into an undef vector, we can
// just use the input to the extract.
if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(1) == N2 && N1.getOperand(0).getValueType() == VT)
return N1.getOperand(0);
// If we are inserting a bitcast value into an undef, with the same
// number of elements, just use the bitcast input of the extract.
// i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
// BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(0).getOperand(1) == N2 &&
N1.getOperand(0).getOperand(0).getValueType().getVectorNumElements() ==
VT.getVectorNumElements() &&
N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
VT.getSizeInBits()) {
return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
}
// If both N1 and N2 are bitcast values on which insert_subvector
// would makes sense, pull the bitcast through.
// i.e. INSERT_SUBVECTOR (BITCAST N0) (BITCAST N1) N2 ->
// BITCAST (INSERT_SUBVECTOR N0 N1 N2)
if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) {
SDValue CN0 = N0.getOperand(0);
SDValue CN1 = N1.getOperand(0);
EVT CN0VT = CN0.getValueType();
EVT CN1VT = CN1.getValueType();
if (CN0VT.isVector() && CN1VT.isVector() &&
CN0VT.getVectorElementType() == CN1VT.getVectorElementType() &&
CN0VT.getVectorNumElements() == VT.getVectorNumElements()) {
SDValue NewINSERT = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N),
CN0.getValueType(), CN0, CN1, N2);
return DAG.getBitcast(VT, NewINSERT);
}
}
// Combine INSERT_SUBVECTORs where we are inserting to the same index.
// INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx )
// --> INSERT_SUBVECTOR( Vec, SubNew, Idx )
if (N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
N0.getOperand(1).getValueType() == N1.getValueType() &&
N0.getOperand(2) == N2)
return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
N1, N2);
// Eliminate an intermediate insert into an undef vector:
// insert_subvector undef, (insert_subvector undef, X, 0), N2 -->
// insert_subvector undef, X, N2
if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)))
return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
N1.getOperand(1), N2);
if (!isa<ConstantSDNode>(N2))
return SDValue();
uint64_t InsIdx = cast<ConstantSDNode>(N2)->getZExtValue();
// Push subvector bitcasts to the output, adjusting the index as we go.
// insert_subvector(bitcast(v), bitcast(s), c1)
// -> bitcast(insert_subvector(v, s, c2))
if ((N0.isUndef() || N0.getOpcode() == ISD::BITCAST) &&
N1.getOpcode() == ISD::BITCAST) {
SDValue N0Src = peekThroughBitcasts(N0);
SDValue N1Src = peekThroughBitcasts(N1);
EVT N0SrcSVT = N0Src.getValueType().getScalarType();
EVT N1SrcSVT = N1Src.getValueType().getScalarType();
if ((N0.isUndef() || N0SrcSVT == N1SrcSVT) &&
N0Src.getValueType().isVector() && N1Src.getValueType().isVector()) {
EVT NewVT;
SDLoc DL(N);
SDValue NewIdx;
MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
LLVMContext &Ctx = *DAG.getContext();
unsigned NumElts = VT.getVectorNumElements();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
if ((EltSizeInBits % N1SrcSVT.getSizeInBits()) == 0) {
unsigned Scale = EltSizeInBits / N1SrcSVT.getSizeInBits();
NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts * Scale);
NewIdx = DAG.getConstant(InsIdx * Scale, DL, IdxVT);
} else if ((N1SrcSVT.getSizeInBits() % EltSizeInBits) == 0) {
unsigned Scale = N1SrcSVT.getSizeInBits() / EltSizeInBits;
if ((NumElts % Scale) == 0 && (InsIdx % Scale) == 0) {
NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts / Scale);
NewIdx = DAG.getConstant(InsIdx / Scale, DL, IdxVT);
}
}
if (NewIdx && hasOperation(ISD::INSERT_SUBVECTOR, NewVT)) {
SDValue Res = DAG.getBitcast(NewVT, N0Src);
Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT, Res, N1Src, NewIdx);
return DAG.getBitcast(VT, Res);
}
}
}
// Canonicalize insert_subvector dag nodes.
// Example:
// (insert_subvector (insert_subvector A, Idx0), Idx1)
// -> (insert_subvector (insert_subvector A, Idx1), Idx0)
if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.hasOneUse() &&
N1.getValueType() == N0.getOperand(1).getValueType() &&
isa<ConstantSDNode>(N0.getOperand(2))) {
unsigned OtherIdx = N0.getConstantOperandVal(2);
if (InsIdx < OtherIdx) {
// Swap nodes.
SDValue NewOp = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT,
N0.getOperand(0), N1, N2);
AddToWorklist(NewOp.getNode());
return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N0.getNode()),
VT, NewOp, N0.getOperand(1), N0.getOperand(2));
}
}
// If the input vector is a concatenation, and the insert replaces
// one of the pieces, we can optimize into a single concat_vectors.
if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() &&
N0.getOperand(0).getValueType() == N1.getValueType()) {
unsigned Factor = N1.getValueType().getVectorNumElements();
SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end());
Ops[cast<ConstantSDNode>(N2)->getZExtValue() / Factor] = N1;
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
}
// Simplify source operands based on insertion.
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
SDValue N0 = N->getOperand(0);
// fold (fp_to_fp16 (fp16_to_fp op)) -> op
if (N0->getOpcode() == ISD::FP16_TO_FP)
return N0->getOperand(0);
return SDValue();
}
SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
// fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op)
if (N0->getOpcode() == ISD::AND) {
ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1));
if (AndConst && AndConst->getAPIntValue() == 0xffff) {
return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0),
N0.getOperand(0));
}
}
return SDValue();
}
SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N0.getValueType();
unsigned Opcode = N->getOpcode();
// VECREDUCE over 1-element vector is just an extract.
if (VT.getVectorNumElements() == 1) {
SDLoc dl(N);
SDValue Res = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, VT.getVectorElementType(), N0,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
if (Res.getValueType() != N->getValueType(0))
Res = DAG.getNode(ISD::ANY_EXTEND, dl, N->getValueType(0), Res);
return Res;
}
// On an boolean vector an and/or reduction is the same as a umin/umax
// reduction. Convert them if the latter is legal while the former isn't.
if (Opcode == ISD::VECREDUCE_AND || Opcode == ISD::VECREDUCE_OR) {
unsigned NewOpcode = Opcode == ISD::VECREDUCE_AND
? ISD::VECREDUCE_UMIN : ISD::VECREDUCE_UMAX;
if (!TLI.isOperationLegalOrCustom(Opcode, VT) &&
TLI.isOperationLegalOrCustom(NewOpcode, VT) &&
DAG.ComputeNumSignBits(N0) == VT.getScalarSizeInBits())
return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), N0);
}
return SDValue();
}
/// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
/// with the destination vector and a zero vector.
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
/// vector_shuffle V, Zero, <0, 4, 2, 4>
SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = peekThroughBitcasts(N->getOperand(1));
SDLoc DL(N);
// Make sure we're not running after operation legalization where it
// may have custom lowered the vector shuffles.
if (LegalOperations)
return SDValue();
if (RHS.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
EVT RVT = RHS.getValueType();
unsigned NumElts = RHS.getNumOperands();
// Attempt to create a valid clear mask, splitting the mask into
// sub elements and checking to see if each is
// all zeros or all ones - suitable for shuffle masking.
auto BuildClearMask = [&](int Split) {
int NumSubElts = NumElts * Split;
int NumSubBits = RVT.getScalarSizeInBits() / Split;
SmallVector<int, 8> Indices;
for (int i = 0; i != NumSubElts; ++i) {
int EltIdx = i / Split;
int SubIdx = i % Split;
SDValue Elt = RHS.getOperand(EltIdx);
if (Elt.isUndef()) {
Indices.push_back(-1);
continue;
}
APInt Bits;
if (isa<ConstantSDNode>(Elt))
Bits = cast<ConstantSDNode>(Elt)->getAPIntValue();
else if (isa<ConstantFPSDNode>(Elt))
Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt();
else
return SDValue();
// Extract the sub element from the constant bit mask.
if (DAG.getDataLayout().isBigEndian()) {
Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits);
} else {
Bits.lshrInPlace(SubIdx * NumSubBits);
}
if (Split > 1)
Bits = Bits.trunc(NumSubBits);
if (Bits.isAllOnesValue())
Indices.push_back(i);
else if (Bits == 0)
Indices.push_back(i + NumSubElts);
else
return SDValue();
}
// Let's see if the target supports this vector_shuffle.
EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits);
EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts);
if (!TLI.isVectorClearMaskLegal(Indices, ClearVT))
return SDValue();
SDValue Zero = DAG.getConstant(0, DL, ClearVT);
return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL,
DAG.getBitcast(ClearVT, LHS),
Zero, Indices));
};
// Determine maximum split level (byte level masking).
int MaxSplit = 1;
if (RVT.getScalarSizeInBits() % 8 == 0)
MaxSplit = RVT.getScalarSizeInBits() / 8;
for (int Split = 1; Split <= MaxSplit; ++Split)
if (RVT.getScalarSizeInBits() % Split == 0)
if (SDValue S = BuildClearMask(Split))
return S;
return SDValue();
}
/// If a vector binop is performed on splat values, it may be profitable to
/// extract, scalarize, and insert/splat.
static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
unsigned Opcode = N->getOpcode();
EVT VT = N->getValueType(0);
EVT EltVT = VT.getVectorElementType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// TODO: Remove/replace the extract cost check? If the elements are available
// as scalars, then there may be no extract cost. Should we ask if
// inserting a scalar back into a vector is cheap instead?
int Index0, Index1;
SDValue Src0 = DAG.getSplatSourceVector(N0, Index0);
SDValue Src1 = DAG.getSplatSourceVector(N1, Index1);
if (!Src0 || !Src1 || Index0 != Index1 ||
Src0.getValueType().getVectorElementType() != EltVT ||
Src1.getValueType().getVectorElementType() != EltVT ||
!TLI.isExtractVecEltCheap(VT, Index0) ||
!TLI.isOperationLegalOrCustom(Opcode, EltVT))
return SDValue();
SDLoc DL(N);
SDValue IndexC =
DAG.getConstant(Index0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()));
SDValue X = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, N0, IndexC);
SDValue Y = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, N1, IndexC);
SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, X, Y, N->getFlags());
// If all lanes but 1 are undefined, no need to splat the scalar result.
// TODO: Keep track of undefs and use that info in the general case.
if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode() &&
count_if(N0->ops(), [](SDValue V) { return !V.isUndef(); }) == 1 &&
count_if(N1->ops(), [](SDValue V) { return !V.isUndef(); }) == 1) {
// bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) -->
// build_vec ..undef, (bo X, Y), undef...
SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), DAG.getUNDEF(EltVT));
Ops[Index0] = ScalarBO;
return DAG.getBuildVector(VT, DL, Ops);
}
// bo (splat X, Index), (splat Y, Index) --> splat (bo X, Y), Index
SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), ScalarBO);
return DAG.getBuildVector(VT, DL, Ops);
}
/// Visit a binary vector operation, like ADD.
SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
assert(N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Ops[] = {LHS, RHS};
EVT VT = N->getValueType(0);
unsigned Opcode = N->getOpcode();
// See if we can constant fold the vector operation.
if (SDValue Fold = DAG.FoldConstantVectorArithmetic(
Opcode, SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags()))
return Fold;
// Move unary shuffles with identical masks after a vector binop:
// VBinOp (shuffle A, Undef, Mask), (shuffle B, Undef, Mask))
// --> shuffle (VBinOp A, B), Undef, Mask
// This does not require type legality checks because we are creating the
// same types of operations that are in the original sequence. We do have to
// restrict ops like integer div that have immediate UB (eg, div-by-zero)
// though. This code is adapted from the identical transform in instcombine.
if (Opcode != ISD::UDIV && Opcode != ISD::SDIV &&
Opcode != ISD::UREM && Opcode != ISD::SREM &&
Opcode != ISD::UDIVREM && Opcode != ISD::SDIVREM) {
auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
(LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
SDLoc DL(N);
SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS.getOperand(0),
RHS.getOperand(0), N->getFlags());
SDValue UndefV = LHS.getOperand(1);
return DAG.getVectorShuffle(VT, DL, NewBinOp, UndefV, Shuf0->getMask());
}
}
// The following pattern is likely to emerge with vector reduction ops. Moving
// the binary operation ahead of insertion may allow using a narrower vector
// instruction that has better performance than the wide version of the op:
// VBinOp (ins undef, X, Z), (ins undef, Y, Z) --> ins VecC, (VBinOp X, Y), Z
if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR && LHS.getOperand(0).isUndef() &&
RHS.getOpcode() == ISD::INSERT_SUBVECTOR && RHS.getOperand(0).isUndef() &&
LHS.getOperand(2) == RHS.getOperand(2) &&
(LHS.hasOneUse() || RHS.hasOneUse())) {
SDValue X = LHS.getOperand(1);
SDValue Y = RHS.getOperand(1);
SDValue Z = LHS.getOperand(2);
EVT NarrowVT = X.getValueType();
if (NarrowVT == Y.getValueType() &&
TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT)) {
// (binop undef, undef) may not return undef, so compute that result.
SDLoc DL(N);
SDValue VecC =
DAG.getNode(Opcode, DL, VT, DAG.getUNDEF(VT), DAG.getUNDEF(VT));
SDValue NarrowBO = DAG.getNode(Opcode, DL, NarrowVT, X, Y);
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, VecC, NarrowBO, Z);
}
}
if (SDValue V = scalarizeBinOpOfSplats(N, DAG))
return V;
return SDValue();
}
SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2) {
assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If we got a simplified select_cc node back from SimplifySelectCC, then
// break it down into a new SETCC node, and a new SELECT node, and then return
// the SELECT node, since we were called with a SELECT node.
if (SCC.getNode()) {
// Check to see if we got a select_cc back (to turn into setcc/select).
// Otherwise, just return whatever node we got back, like fabs.
if (SCC.getOpcode() == ISD::SELECT_CC) {
const SDNodeFlags Flags = N0.getNode()->getFlags();
SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
N0.getValueType(),
SCC.getOperand(0), SCC.getOperand(1),
SCC.getOperand(4), Flags);
AddToWorklist(SETCC.getNode());
SDValue SelectNode = DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
SCC.getOperand(2), SCC.getOperand(3));
SelectNode->setFlags(Flags);
return SelectNode;
}
return SCC;
}
return SDValue();
}
/// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
/// being selected between, see if we can simplify the select. Callers of this
/// should assume that TheSelect is deleted if this returns true. As such, they
/// should return the appropriate thing (e.g. the node) back to the top-level of
/// the DAG combiner loop to avoid it being looked at.
bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
SDValue RHS) {
// fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
// The select + setcc is redundant, because fsqrt returns NaN for X < 0.
if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) {
if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) {
// We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?))
SDValue Sqrt = RHS;
ISD::CondCode CC;
SDValue CmpLHS;
const ConstantFPSDNode *Zero = nullptr;
if (TheSelect->getOpcode() == ISD::SELECT_CC) {
CC = cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
CmpLHS = TheSelect->getOperand(0);
Zero = isConstOrConstSplatFP(TheSelect->getOperand(1));
} else {
// SELECT or VSELECT
SDValue Cmp = TheSelect->getOperand(0);
if (Cmp.getOpcode() == ISD::SETCC) {
CC = cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
CmpLHS = Cmp.getOperand(0);
Zero = isConstOrConstSplatFP(Cmp.getOperand(1));
}
}
if (Zero && Zero->isZero() &&
Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT ||
CC == ISD::SETULT || CC == ISD::SETLT)) {
// We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
CombineTo(TheSelect, Sqrt);
return true;
}
}
}
// Cannot simplify select with vector condition
if (TheSelect->getOperand(0).getValueType().isVector()) return false;
// If this is a select from two identical things, try to pull the operation
// through the select.
if (LHS.getOpcode() != RHS.getOpcode() ||
!LHS.hasOneUse() || !RHS.hasOneUse())
return false;
// If this is a load and the token chain is identical, replace the select
// of two loads with a load through a select of the address to load from.
// This triggers in things like "select bool X, 10.0, 123.0" after the FP
// constants have been dropped into the constant pool.
if (LHS.getOpcode() == ISD::LOAD) {
LoadSDNode *LLD = cast<LoadSDNode>(LHS);
LoadSDNode *RLD = cast<LoadSDNode>(RHS);
// Token chains must be identical.
if (LHS.getOperand(0) != RHS.getOperand(0) ||
// Do not let this transformation reduce the number of volatile loads.
LLD->isVolatile() || RLD->isVolatile() ||
// FIXME: If either is a pre/post inc/dec load,
// we'd need to split out the address adjustment.
LLD->isIndexed() || RLD->isIndexed() ||
// If this is an EXTLOAD, the VT's must match.
LLD->getMemoryVT() != RLD->getMemoryVT() ||
// If this is an EXTLOAD, the kind of extension must match.
(LLD->getExtensionType() != RLD->getExtensionType() &&
// The only exception is if one of the extensions is anyext.
LLD->getExtensionType() != ISD::EXTLOAD &&
RLD->getExtensionType() != ISD::EXTLOAD) ||
// FIXME: this discards src value information. This is
// over-conservative. It would be beneficial to be able to remember
// both potential memory locations. Since we are discarding
// src value info, don't do the transformation if the memory
// locations are not in the default address space.
LLD->getPointerInfo().getAddrSpace() != 0 ||
RLD->getPointerInfo().getAddrSpace() != 0 ||
// We can't produce a CMOV of a TargetFrameIndex since we won't
// generate the address generation required.
LLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
RLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
!TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
LLD->getBasePtr().getValueType()))
return false;
// The loads must not depend on one another.
if (LLD->isPredecessorOf(RLD) || RLD->isPredecessorOf(LLD))
return false;
// Check that the select condition doesn't reach either load. If so,
// folding this will induce a cycle into the DAG. If not, this is safe to
// xform, so create a select of the addresses.
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
// Always fail if LLD and RLD are not independent. TheSelect is a
// predecessor to all Nodes in question so we need not search past it.
Visited.insert(TheSelect);
Worklist.push_back(LLD);
Worklist.push_back(RLD);
if (SDNode::hasPredecessorHelper(LLD, Visited, Worklist) ||
SDNode::hasPredecessorHelper(RLD, Visited, Worklist))
return false;
SDValue Addr;
if (TheSelect->getOpcode() == ISD::SELECT) {
// We cannot do this optimization if any pair of {RLD, LLD} is a
// predecessor to {RLD, LLD, CondNode}. As we've already compared the
// Loads, we only need to check if CondNode is a successor to one of the
// loads. We can further avoid this if there's no use of their chain
// value.
SDNode *CondNode = TheSelect->getOperand(0).getNode();
Worklist.push_back(CondNode);
if ((LLD->hasAnyUseOfValue(1) &&
SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
(RLD->hasAnyUseOfValue(1) &&
SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
return false;
Addr = DAG.getSelect(SDLoc(TheSelect),
LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0), LLD->getBasePtr(),
RLD->getBasePtr());
} else { // Otherwise SELECT_CC
// We cannot do this optimization if any pair of {RLD, LLD} is a
// predecessor to {RLD, LLD, CondLHS, CondRHS}. As we've already compared
// the Loads, we only need to check if CondLHS/CondRHS is a successor to
// one of the loads. We can further avoid this if there's no use of their
// chain value.
SDNode *CondLHS = TheSelect->getOperand(0).getNode();
SDNode *CondRHS = TheSelect->getOperand(1).getNode();
Worklist.push_back(CondLHS);
Worklist.push_back(CondRHS);
if ((LLD->hasAnyUseOfValue(1) &&
SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
(RLD->hasAnyUseOfValue(1) &&
SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
return false;
Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0),
TheSelect->getOperand(1),
LLD->getBasePtr(), RLD->getBasePtr(),
TheSelect->getOperand(4));
}
SDValue Load;
// It is safe to replace the two loads if they have different alignments,
// but the new load must be the minimum (most restrictive) alignment of the
// inputs.
unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment());
MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags();
if (!RLD->isInvariant())
MMOFlags &= ~MachineMemOperand::MOInvariant;
if (!RLD->isDereferenceable())
MMOFlags &= ~MachineMemOperand::MODereferenceable;
if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
// FIXME: Discards pointer and AA info.
Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect),
LLD->getChain(), Addr, MachinePointerInfo(), Alignment,
MMOFlags);
} else {
// FIXME: Discards pointer and AA info.
Load = DAG.getExtLoad(
LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType()
: LLD->getExtensionType(),
SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr,
MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags);
}
// Users of the select now use the result of the load.
CombineTo(TheSelect, Load);
// Users of the old loads now use the new load's chain. We know the
// old-load value is dead now.
CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
return true;
}
return false;
}
/// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and
/// bitwise 'and'.
SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
SDValue N1, SDValue N2, SDValue N3,
ISD::CondCode CC) {
// If this is a select where the false operand is zero and the compare is a
// check of the sign bit, see if we can perform the "gzip trick":
// select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
// select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A
EVT XType = N0.getValueType();
EVT AType = N2.getValueType();
if (!isNullConstant(N3) || !XType.bitsGE(AType))
return SDValue();
// If the comparison is testing for a positive value, we have to invert
// the sign bit mask, so only do that transform if the target has a bitwise
// 'and not' instruction (the invert is free).
if (CC == ISD::SETGT && TLI.hasAndNot(N2)) {
// (X > -1) ? A : 0
// (X > 0) ? X : 0 <-- This is canonical signed max.
if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2)))
return SDValue();
} else if (CC == ISD::SETLT) {
// (X < 0) ? A : 0
// (X < 1) ? X : 0 <-- This is un-canonicalized signed min.
if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2)))
return SDValue();
} else {
return SDValue();
}
// and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit
// constant.
EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) {
unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1;
SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy);
SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt);
AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
AddToWorklist(Shift.getNode());
}
if (CC == ISD::SETGT)
Shift = DAG.getNOT(DL, Shift, AType);
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}
SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy);
SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt);
AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
AddToWorklist(Shift.getNode());
}
if (CC == ISD::SETGT)
Shift = DAG.getNOT(DL, Shift, AType);
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}
/// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
/// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
/// in it. This may be a win when the constant is not otherwise available
/// because it replaces two constant pool loads with one.
SDValue DAGCombiner::convertSelectOfFPConstantsToLoadOffset(
const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
ISD::CondCode CC) {
if (!TLI.reduceSelectOfFPConstantLoads(N0.getValueType().isFloatingPoint()))
return SDValue();
// If we are before legalize types, we want the other legalization to happen
// first (for example, to avoid messing with soft float).
auto *TV = dyn_cast<ConstantFPSDNode>(N2);
auto *FV = dyn_cast<ConstantFPSDNode>(N3);
EVT VT = N2.getValueType();
if (!TV || !FV || !TLI.isTypeLegal(VT))
return SDValue();
// If a constant can be materialized without loads, this does not make sense.
if (TLI.getOperationAction(ISD::ConstantFP, VT) == TargetLowering::Legal ||
TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0), ForCodeSize) ||
TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0), ForCodeSize))
return SDValue();
// If both constants have multiple uses, then we won't need to do an extra
// load. The values are likely around in registers for other users.
if (!TV->hasOneUse() && !FV->hasOneUse())
return SDValue();
Constant *Elts[] = { const_cast<ConstantFP*>(FV->getConstantFPValue()),
const_cast<ConstantFP*>(TV->getConstantFPValue()) };
Type *FPTy = Elts[0]->getType();
const DataLayout &TD = DAG.getDataLayout();
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
TD.getPrefTypeAlignment(FPTy));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
// Get offsets to the 0 and 1 elements of the array, so we can select between
// them.
SDValue Zero = DAG.getIntPtrConstant(0, DL);
unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV));
SDValue Cond =
DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), N0, N1, CC);
AddToWorklist(Cond.getNode());
SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), Cond, One, Zero);
AddToWorklist(CstOffset.getNode());
CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, CstOffset);
AddToWorklist(CPIdx.getNode());
return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(
DAG.getMachineFunction()), Alignment);
}
/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
/// where 'cond' is the comparison specified by CC.
SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC,
bool NotExtCompare) {
// (x ? y : y) -> y.
if (N2 == N3) return N2;
EVT CmpOpVT = N0.getValueType();
EVT CmpResVT = getSetCCResultType(CmpOpVT);
EVT VT = N2.getValueType();
auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
auto *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
// Determine if the condition we're dealing with is constant.
if (SDValue SCC = DAG.FoldSetCC(CmpResVT, N0, N1, CC, DL)) {
AddToWorklist(SCC.getNode());
if (auto *SCCC = dyn_cast<ConstantSDNode>(SCC)) {
// fold select_cc true, x, y -> x
// fold select_cc false, x, y -> y
return !(SCCC->isNullValue()) ? N2 : N3;
}
}
if (SDValue V =
convertSelectOfFPConstantsToLoadOffset(DL, N0, N1, N2, N3, CC))
return V;
if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC))
return V;
// fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
// where y is has a single bit set.
// A plaintext description would be, we can turn the SELECT_CC into an AND
// when the condition can be materialized as an all-ones register. Any
// single bit-test can be materialized as an all-ones register with
// shift-left and shift-right-arith.
if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) {
SDValue AndLHS = N0->getOperand(0);
auto *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
// Shift the tested bit over the sign bit.
const APInt &AndMask = ConstAndRHS->getAPIntValue();
SDValue ShlAmt =
DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS),
getShiftAmountTy(AndLHS.getValueType()));
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);
// Now arithmetic right shift it all the way over, so the result is either
// all-ones, or zero.
SDValue ShrAmt =
DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl),
getShiftAmountTy(Shl.getValueType()));
SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);
return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
}
}
// fold select C, 16, 0 -> shl C, 4
bool Fold = N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2();
bool Swap = N3C && isNullConstant(N2) && N3C->getAPIntValue().isPowerOf2();
if ((Fold || Swap) &&
TLI.getBooleanContents(CmpOpVT) ==
TargetLowering::ZeroOrOneBooleanContent &&
(!LegalOperations || TLI.isOperationLegal(ISD::SETCC, CmpOpVT))) {
if (Swap) {
CC = ISD::getSetCCInverse(CC, CmpOpVT.isInteger());
std::swap(N2C, N3C);
}
// If the caller doesn't want us to simplify this into a zext of a compare,
// don't do it.
if (NotExtCompare && N2C->isOne())
return SDValue();
SDValue Temp, SCC;
// zext (setcc n0, n1)
if (LegalTypes) {
SCC = DAG.getSetCC(DL, CmpResVT, N0, N1, CC);
if (VT.bitsLT(SCC.getValueType()))
Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), VT);
else
Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
} else {
SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
}
AddToWorklist(SCC.getNode());
AddToWorklist(Temp.getNode());
if (N2C->isOne())
return Temp;
// shl setcc result by log2 n2c
return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
DAG.getConstant(N2C->getAPIntValue().logBase2(),
SDLoc(Temp),
getShiftAmountTy(Temp.getValueType())));
}
// select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X)
// select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X)
// select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X)
// select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X)
// select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X)
// select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X)
// select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X)
// select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X)
if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
SDValue ValueOnZero = N2;
SDValue Count = N3;
// If the condition is NE instead of E, swap the operands.
if (CC == ISD::SETNE)
std::swap(ValueOnZero, Count);
// Check if the value on zero is a constant equal to the bits in the type.
if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) {
if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) {
// If the other operand is cttz/cttz_zero_undef of N0, and cttz is
// legal, combine to just cttz.
if ((Count.getOpcode() == ISD::CTTZ ||
Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
N0 == Count.getOperand(0) &&
(!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT)))
return DAG.getNode(ISD::CTTZ, DL, VT, N0);
// If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is
// legal, combine to just ctlz.
if ((Count.getOpcode() == ISD::CTLZ ||
Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) &&
N0 == Count.getOperand(0) &&
(!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT)))
return DAG.getNode(ISD::CTLZ, DL, VT, N0);
}
}
}
return SDValue();
}
/// This is a stub for TargetLowering::SimplifySetCC.
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, const SDLoc &DL,
bool foldBooleans) {
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
}
/// Given an ISD::SDIV node expressing a divide by constant, return
/// a DAG expression to select that will generate the same value by multiplying
/// by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
SmallVector<SDNode *, 8> Built;
if (SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, Built)) {
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
return SDValue();
}
/// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
/// DAG expression that will generate the same value by right shifting.
SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
if (!C)
return SDValue();
// Avoid division by zero.
if (C->isNullValue())
return SDValue();
SmallVector<SDNode *, 8> Built;
if (SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, Built)) {
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
return SDValue();
}
/// Given an ISD::UDIV node expressing a divide by constant, return a DAG
/// expression that will generate the same value by multiplying by a magic
/// number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
SmallVector<SDNode *, 8> Built;
if (SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, Built)) {
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
return SDValue();
}
/// Determines the LogBase2 value for a non-null input value using the
/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
EVT VT = V.getValueType();
unsigned EltBits = VT.getScalarSizeInBits();
SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
SDValue Base = DAG.getConstant(EltBits - 1, DL, VT);
SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
return LogBase2;
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal, we need to find the zero of the function:
/// F(X) = A X - 1 [which has a zero at X = 1/A]
/// =>
/// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
/// does not require additional intermediate precision]
SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags) {
if (Level >= AfterLegalizeDAG)
return SDValue();
// TODO: Handle half and/or extended types?
EVT VT = Op.getValueType();
if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
return SDValue();
// If estimates are explicitly disabled for this function, we're done.
MachineFunction &MF = DAG.getMachineFunction();
int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF);
if (Enabled == TLI.ReciprocalEstimate::Disabled)
return SDValue();
// Estimates may be explicitly enabled for this type with a custom number of
// refinement steps.
int Iterations = TLI.getDivRefinementSteps(VT, MF);
if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) {
AddToWorklist(Est.getNode());
if (Iterations) {
SDLoc DL(Op);
SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
// Newton iterations: Est = Est + Est (1 - Arg * Est)
for (int i = 0; i < Iterations; ++i) {
SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est, Flags);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst, Flags);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
AddToWorklist(NewEst.getNode());
Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst, Flags);
AddToWorklist(Est.getNode());
}
}
return Est;
}
return SDValue();
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
/// =>
/// X_{i+1} = X_i (1.5 - A X_i^2 / 2)
/// As a result, we precompute A/2 prior to the iteration loop.
SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
unsigned Iterations,
SDNodeFlags Flags, bool Reciprocal) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);
// We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
// this entire sequence requires only one FP constant.
SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags);
AddToWorklist(HalfArg.getNode());
HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags);
AddToWorklist(HalfArg.getNode());
// Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
for (unsigned i = 0; i < Iterations; ++i) {
SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags);
AddToWorklist(NewEst.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
AddToWorklist(Est.getNode());
}
// If non-reciprocal square root is requested, multiply the result by Arg.
if (!Reciprocal) {
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags);
AddToWorklist(Est.getNode());
}
return Est;
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
/// =>
/// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
unsigned Iterations,
SDNodeFlags Flags, bool Reciprocal) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT);
// This routine must enter the loop below to work correctly
// when (Reciprocal == false).
assert(Iterations > 0);
// Newton iterations for reciprocal square root:
// E = (E * -0.5) * ((A * E) * E + -3.0)
for (unsigned i = 0; i < Iterations; ++i) {
SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags);
AddToWorklist(AE.getNode());
SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags);
AddToWorklist(AEE.getNode());
SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags);
AddToWorklist(RHS.getNode());
// When calculating a square root at the last iteration build:
// S = ((A * E) * -0.5) * ((A * E) * E + -3.0)
// (notice a common subexpression)
SDValue LHS;
if (Reciprocal || (i + 1) < Iterations) {
// RSQRT: LHS = (E * -0.5)
LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags);
} else {
// SQRT: LHS = (A * E) * -0.5
LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags);
}
AddToWorklist(LHS.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags);
AddToWorklist(Est.getNode());
}
return Est;
}
/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
/// Op can be zero.
SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
bool Reciprocal) {
if (Level >= AfterLegalizeDAG)
return SDValue();
// TODO: Handle half and/or extended types?
EVT VT = Op.getValueType();
if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
return SDValue();
// If estimates are explicitly disabled for this function, we're done.
MachineFunction &MF = DAG.getMachineFunction();
int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF);
if (Enabled == TLI.ReciprocalEstimate::Disabled)
return SDValue();
// Estimates may be explicitly enabled for this type with a custom number of
// refinement steps.
int Iterations = TLI.getSqrtRefinementSteps(VT, MF);
bool UseOneConstNR = false;
if (SDValue Est =
TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR,
Reciprocal)) {
AddToWorklist(Est.getNode());
if (Iterations) {
Est = UseOneConstNR
? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal)
: buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal);
if (!Reciprocal) {
// The estimate is now completely wrong if the input was exactly 0.0 or
// possibly a denormal. Force the answer to 0.0 for those cases.
SDLoc DL(Op);
EVT CCVT = getSetCCResultType(VT);
ISD::NodeType SelOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT;
const Function &F = DAG.getMachineFunction().getFunction();
Attribute Denorms = F.getFnAttribute("denormal-fp-math");
if (Denorms.getValueAsString().equals("ieee")) {
// fabs(X) < SmallestNormal ? 0.0 : Est
const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
SDValue IsDenorm = DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
Est = DAG.getNode(SelOpcode, DL, VT, IsDenorm, FPZero, Est);
AddToWorklist(Fabs.getNode());
AddToWorklist(IsDenorm.getNode());
AddToWorklist(Est.getNode());
} else {
// X == 0.0 ? 0.0 : Est
SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
SDValue IsZero = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
Est = DAG.getNode(SelOpcode, DL, VT, IsZero, FPZero, Est);
AddToWorklist(IsZero.getNode());
AddToWorklist(Est.getNode());
}
}
}
return Est;
}
return SDValue();
}
SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
return buildSqrtEstimateImpl(Op, Flags, true);
}
SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
return buildSqrtEstimateImpl(Op, Flags, false);
}
/// Return true if there is any possibility that the two addresses overlap.
bool DAGCombiner::isAlias(SDNode *Op0, SDNode *Op1) const {
struct MemUseCharacteristics {
bool IsVolatile;
SDValue BasePtr;
int64_t Offset;
Optional<int64_t> NumBytes;
MachineMemOperand *MMO;
};
auto getCharacteristics = [](SDNode *N) -> MemUseCharacteristics {
if (const auto *LSN = dyn_cast<LSBaseSDNode>(N)) {
int64_t Offset = 0;
if (auto *C = dyn_cast<ConstantSDNode>(LSN->getOffset()))
Offset = (LSN->getAddressingMode() == ISD::PRE_INC)
? C->getSExtValue()
: (LSN->getAddressingMode() == ISD::PRE_DEC)
? -1 * C->getSExtValue()
: 0;
return {LSN->isVolatile(), LSN->getBasePtr(), Offset /*base offset*/,
Optional<int64_t>(LSN->getMemoryVT().getStoreSize()),
LSN->getMemOperand()};
}
if (const auto *LN = cast<LifetimeSDNode>(N))
return {false /*isVolatile*/, LN->getOperand(1),
(LN->hasOffset()) ? LN->getOffset() : 0,
(LN->hasOffset()) ? Optional<int64_t>(LN->getSize())
: Optional<int64_t>(),
(MachineMemOperand *)nullptr};
// Default.
return {false /*isvolatile*/, SDValue(), (int64_t)0 /*offset*/,
Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr};
};
MemUseCharacteristics MUC0 = getCharacteristics(Op0),
MUC1 = getCharacteristics(Op1);
// If they are to the same address, then they must be aliases.
if (MUC0.BasePtr.getNode() && MUC0.BasePtr == MUC1.BasePtr &&
MUC0.Offset == MUC1.Offset)
return true;
// If they are both volatile then they cannot be reordered.
if (MUC0.IsVolatile && MUC1.IsVolatile)
return true;
if (MUC0.MMO && MUC1.MMO) {
if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
(MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
return false;
}
// Try to prove that there is aliasing, or that there is no aliasing. Either
// way, we can return now. If nothing can be proved, proceed with more tests.
bool IsAlias;
if (BaseIndexOffset::computeAliasing(Op0, MUC0.NumBytes, Op1, MUC1.NumBytes,
DAG, IsAlias))
return IsAlias;
// The following all rely on MMO0 and MMO1 being valid. Fail conservatively if
// either are not known.
if (!MUC0.MMO || !MUC1.MMO)
return true;
// If one operation reads from invariant memory, and the other may store, they
// cannot alias. These should really be checking the equivalent of mayWrite,
// but it only matters for memory nodes other than load /store.
if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
(MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
return false;
// If we know required SrcValue1 and SrcValue2 have relatively large
// alignment compared to the size and offset of the access, we may be able
// to prove they do not alias. This check is conservative for now to catch
// cases created by splitting vector types.
int64_t SrcValOffset0 = MUC0.MMO->getOffset();
int64_t SrcValOffset1 = MUC1.MMO->getOffset();
unsigned OrigAlignment0 = MUC0.MMO->getBaseAlignment();
unsigned OrigAlignment1 = MUC1.MMO->getBaseAlignment();
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
MUC0.NumBytes.hasValue() && MUC1.NumBytes.hasValue() &&
*MUC0.NumBytes == *MUC1.NumBytes && OrigAlignment0 > *MUC0.NumBytes) {
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
// There is no overlap between these relatively aligned accesses of
// similar size. Return no alias.
if ((OffAlign0 + *MUC0.NumBytes) <= OffAlign1 ||
(OffAlign1 + *MUC1.NumBytes) <= OffAlign0)
return false;
}
bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0
? CombinerGlobalAA
: DAG.getSubtarget().useAA();
#ifndef NDEBUG
if (CombinerAAOnlyFunc.getNumOccurrences() &&
CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
UseAA = false;
#endif
if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue()) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
int64_t Overlap0 = *MUC0.NumBytes + SrcValOffset0 - MinOffset;
int64_t Overlap1 = *MUC1.NumBytes + SrcValOffset1 - MinOffset;
AliasResult AAResult = AA->alias(
MemoryLocation(MUC0.MMO->getValue(), Overlap0,
UseTBAA ? MUC0.MMO->getAAInfo() : AAMDNodes()),
MemoryLocation(MUC1.MMO->getValue(), Overlap1,
UseTBAA ? MUC1.MMO->getAAInfo() : AAMDNodes()));
if (AAResult == NoAlias)
return false;
}
// Otherwise we have to assume they alias.
return true;
}
/// Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallVectorImpl<SDValue> &Aliases) {
SmallVector<SDValue, 8> Chains; // List of chains to visit.
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
// Get alias information for node.
const bool IsLoad = isa<LoadSDNode>(N) && !cast<LoadSDNode>(N)->isVolatile();
// Starting off.
Chains.push_back(OriginalChain);
unsigned Depth = 0;
// Attempt to improve chain by a single step
std::function<bool(SDValue &)> ImproveChain = [&](SDValue &C) -> bool {
switch (C.getOpcode()) {
case ISD::EntryToken:
// No need to mark EntryToken.
C = SDValue();
return true;
case ISD::LOAD:
case ISD::STORE: {
// Get alias information for C.
bool IsOpLoad = isa<LoadSDNode>(C.getNode()) &&
!cast<LSBaseSDNode>(C.getNode())->isVolatile();
if ((IsLoad && IsOpLoad) || !isAlias(N, C.getNode())) {
// Look further up the chain.
C = C.getOperand(0);
return true;
}
// Alias, so stop here.
return false;
}
case ISD::CopyFromReg:
// Always forward past past CopyFromReg.
C = C.getOperand(0);
return true;
case ISD::LIFETIME_START:
case ISD::LIFETIME_END: {
// We can forward past any lifetime start/end that can be proven not to
// alias the memory access.
if (!isAlias(N, C.getNode())) {
// Look further up the chain.
C = C.getOperand(0);
return true;
}
return false;
}
default:
return false;
}
};
// Look at each chain and determine if it is an alias. If so, add it to the
// aliases list. If not, then continue up the chain looking for the next
// candidate.
while (!Chains.empty()) {
SDValue Chain = Chains.pop_back_val();
// Don't bother if we've seen Chain before.
if (!Visited.insert(Chain.getNode()).second)
continue;
// For TokenFactor nodes, look at each operand and only continue up the
// chain until we reach the depth limit.
//
// FIXME: The depth check could be made to return the last non-aliasing
// chain we found before we hit a tokenfactor rather than the original
// chain.
if (Depth > TLI.getGatherAllAliasesMaxDepth()) {
Aliases.clear();
Aliases.push_back(OriginalChain);
return;
}
if (Chain.getOpcode() == ISD::TokenFactor) {
// We have to check each of the operands of the token factor for "small"
// token factors, so we queue them up. Adding the operands to the queue
// (stack) in reverse order maintains the original order and increases the
// likelihood that getNode will find a matching token factor (CSE.)
if (Chain.getNumOperands() > 16) {
Aliases.push_back(Chain);
continue;
}
for (unsigned n = Chain.getNumOperands(); n;)
Chains.push_back(Chain.getOperand(--n));
++Depth;
continue;
}
// Everything else
if (ImproveChain(Chain)) {
// Updated Chain Found, Consider new chain if one exists.
if (Chain.getNode())
Chains.push_back(Chain);
++Depth;
continue;
}
// No Improved Chain Possible, treat as Alias.
Aliases.push_back(Chain);
}
}
/// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
/// (aliasing node.)
SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
if (OptLevel == CodeGenOpt::None)
return OldChain;
// Ops for replacing token factor.
SmallVector<SDValue, 8> Aliases;
// Accumulate all the aliases to this node.
GatherAllAliases(N, OldChain, Aliases);
// If no operands then chain to entry token.
if (Aliases.size() == 0)
return DAG.getEntryNode();
// If a single operand then chain to it. We don't need to revisit it.
if (Aliases.size() == 1)
return Aliases[0];
// Construct a custom tailored token factor.
return DAG.getTokenFactor(SDLoc(N), Aliases);
}
namespace {
// TODO: Replace with with std::monostate when we move to C++17.
struct UnitT { } Unit;
bool operator==(const UnitT &, const UnitT &) { return true; }
bool operator!=(const UnitT &, const UnitT &) { return false; }
} // namespace
// This function tries to collect a bunch of potentially interesting
// nodes to improve the chains of, all at once. This might seem
// redundant, as this function gets called when visiting every store
// node, so why not let the work be done on each store as it's visited?
//
// I believe this is mainly important because MergeConsecutiveStores
// is unable to deal with merging stores of different sizes, so unless
// we improve the chains of all the potential candidates up-front
// before running MergeConsecutiveStores, it might only see some of
// the nodes that will eventually be candidates, and then not be able
// to go from a partially-merged state to the desired final
// fully-merged state.
bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
SmallVector<StoreSDNode *, 8> ChainedStores;
StoreSDNode *STChain = St;
// Intervals records which offsets from BaseIndex have been covered. In
// the common case, every store writes to the immediately previous address
// space and thus merged with the previous interval at insertion time.
using IMap =
llvm::IntervalMap<int64_t, UnitT, 8, IntervalMapHalfOpenInfo<int64_t>>;
IMap::Allocator A;
IMap Intervals(A);
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
// We must have a base and an offset.
if (!BasePtr.getBase().getNode())
return false;
// Do not handle stores to undef base pointers.
if (BasePtr.getBase().isUndef())
return false;
// Add ST's interval.
Intervals.insert(0, (St->getMemoryVT().getSizeInBits() + 7) / 8, Unit);
while (StoreSDNode *Chain = dyn_cast<StoreSDNode>(STChain->getChain())) {
// If the chain has more than one use, then we can't reorder the mem ops.
if (!SDValue(Chain, 0)->hasOneUse())
break;
if (Chain->isVolatile() || Chain->isIndexed())
break;
// Find the base pointer and offset for this memory node.
const BaseIndexOffset Ptr = BaseIndexOffset::match(Chain, DAG);
// Check that the base pointer is the same as the original one.
int64_t Offset;
if (!BasePtr.equalBaseIndex(Ptr, DAG, Offset))
break;
int64_t Length = (Chain->getMemoryVT().getSizeInBits() + 7) / 8;
// Make sure we don't overlap with other intervals by checking the ones to
// the left or right before inserting.
auto I = Intervals.find(Offset);
// If there's a next interval, we should end before it.
if (I != Intervals.end() && I.start() < (Offset + Length))
break;
// If there's a previous interval, we should start after it.
if (I != Intervals.begin() && (--I).stop() <= Offset)
break;
Intervals.insert(Offset, Offset + Length, Unit);
ChainedStores.push_back(Chain);
STChain = Chain;
}
// If we didn't find a chained store, exit.
if (ChainedStores.size() == 0)
return false;
// Improve all chained stores (St and ChainedStores members) starting from
// where the store chain ended and return single TokenFactor.
SDValue NewChain = STChain->getChain();
SmallVector<SDValue, 8> TFOps;
for (unsigned I = ChainedStores.size(); I;) {
StoreSDNode *S = ChainedStores[--I];
SDValue BetterChain = FindBetterChain(S, NewChain);
S = cast<StoreSDNode>(DAG.UpdateNodeOperands(
S, BetterChain, S->getOperand(1), S->getOperand(2), S->getOperand(3)));
TFOps.push_back(SDValue(S, 0));
ChainedStores[I] = S;
}
// Improve St's chain. Use a new node to avoid creating a loop from CombineTo.
SDValue BetterChain = FindBetterChain(St, NewChain);
SDValue NewST;
if (St->isTruncatingStore())
NewST = DAG.getTruncStore(BetterChain, SDLoc(St), St->getValue(),
St->getBasePtr(), St->getMemoryVT(),
St->getMemOperand());
else
NewST = DAG.getStore(BetterChain, SDLoc(St), St->getValue(),
St->getBasePtr(), St->getMemOperand());
TFOps.push_back(NewST);
// If we improved every element of TFOps, then we've lost the dependence on
// NewChain to successors of St and we need to add it back to TFOps. Do so at
// the beginning to keep relative order consistent with FindBetterChains.
auto hasImprovedChain = [&](SDValue ST) -> bool {
return ST->getOperand(0) != NewChain;
};
bool AddNewChain = llvm::all_of(TFOps, hasImprovedChain);
if (AddNewChain)
TFOps.insert(TFOps.begin(), NewChain);
SDValue TF = DAG.getTokenFactor(SDLoc(STChain), TFOps);
CombineTo(St, TF);
AddToWorklist(STChain);
// Add TF operands worklist in reverse order.
for (auto I = TF->getNumOperands(); I;)
AddToWorklist(TF->getOperand(--I).getNode());
AddToWorklist(TF.getNode());
return true;
}
bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
if (OptLevel == CodeGenOpt::None)
return false;
const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
// We must have a base and an offset.
if (!BasePtr.getBase().getNode())
return false;
// Do not handle stores to undef base pointers.
if (BasePtr.getBase().isUndef())
return false;
// Directly improve a chain of disjoint stores starting at St.
if (parallelizeChainedStores(St))
return true;
// Improve St's Chain..
SDValue BetterChain = FindBetterChain(St, St->getChain());
if (St->getChain() != BetterChain) {
replaceStoreChain(St, BetterChain);
return true;
}
return false;
}
/// This is the entry point for the file.
void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis *AA,
CodeGenOpt::Level OptLevel) {
/// This is the main entry point to this class.
DAGCombiner(*this, AA, OptLevel).Run(Level);
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp (revision 356465)
@@ -1,2485 +1,2484 @@
//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the FastISel class.
//
// "Fast" instruction selection is designed to emit very poor code quickly.
// Also, it is not designed to be able to do much lowering, so most illegal
// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
// also not intended to be able to do much optimization, except in a few cases
// where doing optimizations reduces overall compile time. For example, folding
// constants into immediate fields is often done, because it's cheap and it
// reduces the number of instructions later phases have to examine.
//
// "Fast" instruction selection is able to fail gracefully and transfer
// control to the SelectionDAG selector for operations that it doesn't
// support. In many cases, this allows us to avoid duplicating a lot of
// the complicated lowering logic that SelectionDAG currently has.
//
// The intended use for "fast" instruction selection is "-O0" mode
// compilation, where the quality of the generated code is irrelevant when
// weighed against the speed at which the code can be generated. Also,
// at -O0, the LLVM optimizers are not running, and this makes the
// compile time of codegen a much higher portion of the overall compile
// time. Despite its limitations, "fast" instruction selection is able to
// handle enough code on its own to provide noticeable overall speedups
// in -O0 compiles.
//
// Basic operations are supported in a target-independent way, by reading
// the same instruction descriptions that the SelectionDAG selector reads,
// and identifying simple arithmetic operations that can be directly selected
// from simple operators. More complicated operations currently require
// target-specific code.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/FastISel.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>
using namespace llvm;
using namespace PatternMatch;
#define DEBUG_TYPE "isel"
// FIXME: Remove this after the feature has proven reliable.
static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values",
cl::init(true), cl::Hidden,
cl::desc("Sink local values in FastISel"));
STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
"target-independent selector");
STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
"target-specific selector");
STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
/// Set the current block to which generated machine instructions will be
/// appended.
void FastISel::startNewBlock() {
assert(LocalValueMap.empty() &&
"local values should be cleared after finishing a BB");
// Instructions are appended to FuncInfo.MBB. If the basic block already
// contains labels or copies, use the last instruction as the last local
// value.
EmitStartPt = nullptr;
if (!FuncInfo.MBB->empty())
EmitStartPt = &FuncInfo.MBB->back();
LastLocalValue = EmitStartPt;
}
/// Flush the local CSE map and sink anything we can.
void FastISel::finishBasicBlock() { flushLocalValueMap(); }
bool FastISel::lowerArguments() {
if (!FuncInfo.CanLowerReturn)
// Fallback to SDISel argument lowering code to deal with sret pointer
// parameter.
return false;
if (!fastLowerArguments())
return false;
// Enter arguments into ValueMap for uses in non-entry BBs.
for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
E = FuncInfo.Fn->arg_end();
I != E; ++I) {
DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
assert(VI != LocalValueMap.end() && "Missed an argument?");
FuncInfo.ValueMap[&*I] = VI->second;
}
return true;
}
/// Return the defined register if this instruction defines exactly one
/// virtual register and uses no other virtual registers. Otherwise return 0.
static unsigned findSinkableLocalRegDef(MachineInstr &MI) {
unsigned RegDef = 0;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (MO.isDef()) {
if (RegDef)
return 0;
RegDef = MO.getReg();
} else if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
// This is another use of a vreg. Don't try to sink it.
return 0;
}
}
return RegDef;
}
void FastISel::flushLocalValueMap() {
// Try to sink local values down to their first use so that we can give them a
// better debug location. This has the side effect of shrinking local value
// live ranges, which helps out fast regalloc.
if (SinkLocalValues && LastLocalValue != EmitStartPt) {
// Sink local value materialization instructions between EmitStartPt and
// LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
// avoid inserting into the range that we're iterating over.
MachineBasicBlock::reverse_iterator RE =
EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
: FuncInfo.MBB->rend();
MachineBasicBlock::reverse_iterator RI(LastLocalValue);
InstOrderMap OrderMap;
for (; RI != RE;) {
MachineInstr &LocalMI = *RI;
++RI;
bool Store = true;
if (!LocalMI.isSafeToMove(nullptr, Store))
continue;
unsigned DefReg = findSinkableLocalRegDef(LocalMI);
if (DefReg == 0)
continue;
sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap);
}
}
LocalValueMap.clear();
LastLocalValue = EmitStartPt;
recomputeInsertPt();
SavedInsertPt = FuncInfo.InsertPt;
LastFlushPoint = FuncInfo.InsertPt;
}
static bool isRegUsedByPhiNodes(unsigned DefReg,
FunctionLoweringInfo &FuncInfo) {
for (auto &P : FuncInfo.PHINodesToUpdate)
if (P.second == DefReg)
return true;
return false;
}
/// Build a map of instruction orders. Return the first terminator and its
/// order. Consider EH_LABEL instructions to be terminators as well, since local
/// values for phis after invokes must be materialized before the call.
void FastISel::InstOrderMap::initialize(
MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) {
unsigned Order = 0;
for (MachineInstr &I : *MBB) {
if (!FirstTerminator &&
(I.isTerminator() || (I.isEHLabel() && &I != &MBB->front()))) {
FirstTerminator = &I;
FirstTerminatorOrder = Order;
}
Orders[&I] = Order++;
// We don't need to order instructions past the last flush point.
if (I.getIterator() == LastFlushPoint)
break;
}
}
void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
unsigned DefReg,
InstOrderMap &OrderMap) {
// If this register is used by a register fixup, MRI will not contain all
// the uses until after register fixups, so don't attempt to sink or DCE
// this instruction. Register fixups typically come from no-op cast
// instructions, which replace the cast instruction vreg with the local
// value vreg.
if (FuncInfo.RegsWithFixups.count(DefReg))
return;
// We can DCE this instruction if there are no uses and it wasn't a
// materialized for a successor PHI node.
bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
if (EmitStartPt == &LocalMI)
EmitStartPt = EmitStartPt->getPrevNode();
LLVM_DEBUG(dbgs() << "removing dead local value materialization "
<< LocalMI);
OrderMap.Orders.erase(&LocalMI);
LocalMI.eraseFromParent();
return;
}
// Number the instructions if we haven't yet so we can efficiently find the
// earliest use.
if (OrderMap.Orders.empty())
OrderMap.initialize(FuncInfo.MBB, LastFlushPoint);
// Find the first user in the BB.
MachineInstr *FirstUser = nullptr;
unsigned FirstOrder = std::numeric_limits<unsigned>::max();
for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) {
auto I = OrderMap.Orders.find(&UseInst);
assert(I != OrderMap.Orders.end() &&
"local value used by instruction outside local region");
unsigned UseOrder = I->second;
if (UseOrder < FirstOrder) {
FirstOrder = UseOrder;
FirstUser = &UseInst;
}
}
// The insertion point will be the first terminator or the first user,
// whichever came first. If there was no terminator, this must be a
// fallthrough block and the insertion point is the end of the block.
MachineBasicBlock::instr_iterator SinkPos;
if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) {
FirstOrder = OrderMap.FirstTerminatorOrder;
SinkPos = OrderMap.FirstTerminator->getIterator();
} else if (FirstUser) {
SinkPos = FirstUser->getIterator();
} else {
assert(UsedByPHI && "must be users if not used by a phi");
SinkPos = FuncInfo.MBB->instr_end();
}
// Collect all DBG_VALUEs before the new insertion position so that we can
// sink them.
SmallVector<MachineInstr *, 1> DbgValues;
for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) {
if (!DbgVal.isDebugValue())
continue;
unsigned UseOrder = OrderMap.Orders[&DbgVal];
if (UseOrder < FirstOrder)
DbgValues.push_back(&DbgVal);
}
// Sink LocalMI before SinkPos and assign it the same DebugLoc.
LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
FuncInfo.MBB->remove(&LocalMI);
FuncInfo.MBB->insert(SinkPos, &LocalMI);
if (SinkPos != FuncInfo.MBB->end())
LocalMI.setDebugLoc(SinkPos->getDebugLoc());
// Sink any debug values that we've collected.
for (MachineInstr *DI : DbgValues) {
FuncInfo.MBB->remove(DI);
FuncInfo.MBB->insert(SinkPos, DI);
}
}
bool FastISel::hasTrivialKill(const Value *V) {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I)
return false;
// No-op casts are trivially coalesced by fast-isel.
if (const auto *Cast = dyn_cast<CastInst>(I))
if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
return false;
// Even the value might have only one use in the LLVM IR, it is possible that
// FastISel might fold the use into another instruction and now there is more
// than one use at the Machine Instruction level.
unsigned Reg = lookUpRegForValue(V);
if (Reg && !MRI.use_empty(Reg))
return false;
// GEPs with all zero indices are trivially coalesced by fast-isel.
if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
return false;
// Only instructions with a single use in the same basic block are considered
// to have trivial kills.
return I->hasOneUse() &&
!(I->getOpcode() == Instruction::BitCast ||
I->getOpcode() == Instruction::PtrToInt ||
I->getOpcode() == Instruction::IntToPtr) &&
cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
}
unsigned FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
return 0;
// Ignore illegal types. We must do this before looking up the value
// in ValueMap because Arguments are given virtual registers regardless
// of whether FastISel can handle them.
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
else
return 0;
}
// Look up the value to see if we already have a register for it.
unsigned Reg = lookUpRegForValue(V);
if (Reg)
return Reg;
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
if (isa<Instruction>(V) &&
(!isa<AllocaInst>(V) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
return FuncInfo.InitializeRegForValue(V);
SavePoint SaveInsertPt = enterLocalValueArea();
// Materialize the value in a register. Emit any instructions in the
// local value area.
Reg = materializeRegForValue(V, VT);
leaveLocalValueArea(SaveInsertPt);
return Reg;
}
unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
unsigned Reg = 0;
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V))
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
else if (isa<ConstantPointerNull>(V))
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
Reg = getRegForValue(
Constant::getNullValue(DL.getIntPtrType(V->getContext())));
else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue())
Reg = fastMaterializeFloatZero(CF);
else
// Try to emit the constant directly.
Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
// Try to emit the constant by using an integer constant with a cast.
const APFloat &Flt = CF->getValueAPF();
EVT IntVT = TLI.getPointerTy(DL);
uint32_t IntBitWidth = IntVT.getSizeInBits();
APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
bool isExact;
(void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
if (isExact) {
unsigned IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
if (IntegerReg != 0)
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
/*Kill=*/false);
}
}
} else if (const auto *Op = dyn_cast<Operator>(V)) {
if (!selectOperator(Op, Op->getOpcode()))
if (!isa<Instruction>(Op) ||
!fastSelectInstruction(cast<Instruction>(Op)))
return 0;
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
return Reg;
}
/// Helper for getRegForValue. This function is called when the value isn't
/// already available in a register and must be materialized with new
/// instructions.
unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
unsigned Reg = 0;
// Give the target-specific code a try first.
if (isa<Constant>(V))
Reg = fastMaterializeConstant(cast<Constant>(V));
// If target-specific code couldn't or didn't want to handle the value, then
// give target-independent code a try.
if (!Reg)
Reg = materializeConstant(V, VT);
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
if (Reg) {
LocalValueMap[V] = Reg;
LastLocalValue = MRI.getVRegDef(Reg);
}
return Reg;
}
unsigned FastISel::lookUpRegForValue(const Value *V) {
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
if (I != FuncInfo.ValueMap.end())
return I->second;
return LocalValueMap[V];
}
void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
return;
}
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
// Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
// Arrange for uses of AssignedReg to be replaced by uses of Reg.
for (unsigned i = 0; i < NumRegs; i++) {
FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
FuncInfo.RegsWithFixups.insert(Reg + i);
}
AssignedReg = Reg;
}
}
std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
unsigned IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return std::pair<unsigned, bool>(0, false);
bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it.
MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
IdxNIsKill);
IdxNIsKill = true;
} else if (IdxVT.bitsGT(PtrVT)) {
IdxN =
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
IdxNIsKill = true;
}
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
}
void FastISel::recomputeInsertPt() {
if (getLastLocalValue()) {
FuncInfo.InsertPt = getLastLocalValue();
FuncInfo.MBB = FuncInfo.InsertPt->getParent();
++FuncInfo.InsertPt;
} else
FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
// Now skip past any EH_LABELs, which must remain at the beginning.
while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
++FuncInfo.InsertPt;
}
void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E) {
assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
"Invalid iterator!");
while (I != E) {
if (LastFlushPoint == I)
LastFlushPoint = E;
if (SavedInsertPt == I)
SavedInsertPt = E;
if (EmitStartPt == I)
EmitStartPt = E.isValid() ? &*E : nullptr;
if (LastLocalValue == I)
LastLocalValue = E.isValid() ? &*E : nullptr;
MachineInstr *Dead = &*I;
++I;
Dead->eraseFromParent();
++NumFastIselDead;
}
recomputeInsertPt();
}
FastISel::SavePoint FastISel::enterLocalValueArea() {
MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
DebugLoc OldDL = DbgLoc;
recomputeInsertPt();
DbgLoc = DebugLoc();
SavePoint SP = {OldInsertPt, OldDL};
return SP;
}
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
LastLocalValue = &*std::prev(FuncInfo.InsertPt);
// Restore the previous insert position.
FuncInfo.InsertPt = OldInsertPt.InsertPt;
DbgLoc = OldInsertPt.DL;
}
bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
// We only handle legal types. For example, on x86-32 the instruction
// selector contains all of the 64-bit instructions from x86-64,
// under the assumption that i64 won't be used if the target doesn't
// support it.
if (!TLI.isTypeLegal(VT)) {
// MVT::i1 is special. Allow AND, OR, or XOR because they
// don't require additional zeroing, which makes them easy.
if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
ISDOpcode == ISD::XOR))
VT = TLI.getTypeToTransformTo(I->getContext(), VT);
else
return false;
}
// Check if the first operand is a constant, and handle it as "ri". At -O0,
// we don't have anything that canonicalizes operand order.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
unsigned Op1 = getRegForValue(I->getOperand(1));
if (!Op1)
return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
unsigned ResultReg =
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
CI->getZExtValue(), VT.getSimpleVT());
if (!ResultReg)
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
unsigned Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// Check if the second operand is a constant and handle it appropriately.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t Imm = CI->getSExtValue();
// Transform "sdiv exact X, 8" -> "sra X, 3".
if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
Imm = Log2_64(Imm);
ISDOpcode = ISD::SRA;
}
// Transform "urem x, pow2" -> "and x, pow2-1".
if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
isPowerOf2_64(Imm)) {
--Imm;
ISDOpcode = ISD::AND;
}
unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
Op0IsKill, Imm, VT.getSimpleVT());
if (!ResultReg)
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
unsigned Op1 = getRegForValue(I->getOperand(1));
if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
// Now we have both operands in registers. Emit the instruction.
unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
if (!ResultReg)
// Target-specific code wasn't able to find a machine opcode for
// the given ISD opcode and type. Halt "fast" selection and bail.
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectGetElementPtr(const User *I) {
unsigned N = getRegForValue(I->getOperand(0));
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool NIsKill = hasTrivialKill(I->getOperand(0));
// Keep a running tab of the total offset to coalesce multiple N = N + Offset
// into a single N = N + TotalOffset.
uint64_t TotalOffs = 0;
// FIXME: What's a good SWAG number for MaxOffs?
uint64_t MaxOffs = 2048;
MVT VT = TLI.getPointerTy(DL);
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (StructType *StTy = GTI.getStructTypeOrNull()) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
}
} else {
Type *Ty = GTI.getIndexedType();
// If this is a constant subscript, handle it quickly.
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero())
continue;
// N = N + Offset
uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
continue;
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
// N = N + Idx * ElementSize;
uint64_t ElementSize = DL.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
unsigned IdxN = Pair.first;
bool IdxNIsKill = Pair.second;
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false;
if (ElementSize != 1) {
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false;
IdxNIsKill = true;
}
N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, N);
return true;
}
bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx) {
for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
Value *Val = CI->getArgOperand(i);
// Check for constants and encode them with a StackMaps::ConstantOp prefix.
if (const auto *C = dyn_cast<ConstantInt>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
} else if (isa<ConstantPointerNull>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(0));
} else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
// Values coming from a stack location also require a special encoding,
// but that is added later on by the target specific frame index
// elimination implementation.
auto SI = FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
Ops.push_back(MachineOperand::CreateFI(SI->second));
else
return false;
} else {
unsigned Reg = getRegForValue(Val);
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
}
}
return true;
}
bool FastISel::selectStackmap(const CallInst *I) {
// void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
// [live variables...])
assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
"Stackmap cannot return a value.");
// The stackmap intrinsic only records the live variables (the arguments
// passed to it) and emits NOPS (if requested). Unlike the patchpoint
// intrinsic, this won't be lowered to a function call. This means we don't
// have to worry about calling conventions and target-specific lowering code.
// Instead we perform the call lowering right here.
//
// CALLSEQ_START(0, 0...)
// STACKMAP(id, nbytes, ...)
// CALLSEQ_END(0, 0)
//
SmallVector<MachineOperand, 32> Ops;
// Add the <id> and <numBytes> constants.
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
// Push live variables for the stack map (skipping the first two arguments
// <id> and <numBytes>).
if (!addStackMapLiveVars(Ops, I, 2))
return false;
// We are not adding any register mask info here, because the stackmap doesn't
// clobber anything.
// Add scratch registers as implicit def and early clobber.
CallingConv::ID CC = I->getCallingConv();
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
/*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
auto Builder =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
Builder.addImm(0);
// Issue STACKMAP.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::STACKMAP));
for (auto const &MO : Ops)
MIB.add(MO);
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(0)
.addImm(0);
// Inform the Frame Information that we have a stackmap in this function.
FuncInfo.MF->getFrameInfo().setHasStackMap();
return true;
}
/// Lower an argument list according to the target calling convention.
///
/// This is a helper for lowering intrinsics that follow a target calling
/// convention or require stack pointer adjustment. Only a subset of the
/// intrinsic's operands need to participate in the calling convention.
bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
unsigned NumArgs, const Value *Callee,
bool ForceRetVoidTy, CallLoweringInfo &CLI) {
ArgListTy Args;
Args.reserve(NumArgs);
// Populate the argument list.
ImmutableCallSite CS(CI);
for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(&CS, ArgI);
Args.push_back(Entry);
}
Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
: CI->getType();
CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
return lowerCallTo(CLI);
}
FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, Target, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
}
bool FastISel::selectPatchpoint(const CallInst *I) {
// void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
// i32 <numBytes>,
// i8* <target>,
// i32 <numArgs>,
// [Args...],
// [live variables...])
CallingConv::ID CC = I->getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !I->getType()->isVoidTy();
Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
// Get the real number of arguments participating in the call <numArgs>
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
"Expected a constant integer.");
const auto *NumArgsVal =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
unsigned NumArgs = NumArgsVal->getZExtValue();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// This includes all meta-operands up to but not including CC.
unsigned NumMetaOpers = PatchPointOpers::CCPos;
assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
// For AnyRegCC the arguments are lowered later on manually.
unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
CallLoweringInfo CLI;
CLI.setIsPatchPoint();
if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
return false;
assert(CLI.Call && "No call instruction specified.");
SmallVector<MachineOperand, 32> Ops;
// Add an explicit result reg if we use the anyreg calling convention.
if (IsAnyRegCC && HasDef) {
assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
CLI.NumResultRegs = 1;
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
}
// Add the <id> and <numBytes> constants.
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
// Add the call target.
if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
if (C->getOpcode() == Instruction::IntToPtr) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else
llvm_unreachable("Unsupported ConstantExpr.");
} else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
Ops.push_back(MachineOperand::CreateGA(GV, 0));
} else if (isa<ConstantPointerNull>(Callee))
Ops.push_back(MachineOperand::CreateImm(0));
else
llvm_unreachable("Unsupported callee address.");
// Adjust <numArgs> to account for any arguments that have been passed on
// the stack instead.
unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
// Add the calling convention
Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
// Add the arguments we omitted previously. The register allocator should
// place these in any free register.
if (IsAnyRegCC) {
for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
unsigned Reg = getRegForValue(I->getArgOperand(i));
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
}
}
// Push the arguments from the call instruction.
for (auto Reg : CLI.OutRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
// Push live variables for the stack map.
if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
return false;
// Push the register mask info.
Ops.push_back(MachineOperand::CreateRegMask(
TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
// Add scratch registers as implicit def and early clobber.
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
/*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
// Add implicit defs (return values).
for (auto Reg : CLI.InRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
/*isImp=*/true));
// Insert the patchpoint instruction before the call generated by the target.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
TII.get(TargetOpcode::PATCHPOINT));
for (auto &MO : Ops)
MIB.add(MO);
MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
// Delete the original call instruction.
CLI.Call->eraseFromParent();
// Inform the Frame Information that we have a patchpoint in this function.
FuncInfo.MF->getFrameInfo().setHasPatchPoint();
if (CLI.NumResultRegs)
updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
return true;
}
bool FastISel::selectXRayCustomEvent(const CallInst *I) {
const auto &Triple = TM.getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return true; // don't do anything to this instruction.
SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
/*isDef=*/false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
for (auto &MO : Ops)
MIB.add(MO);
// Insert the Patchable Event Call instruction, that gets lowered properly.
return true;
}
bool FastISel::selectXRayTypedEvent(const CallInst *I) {
const auto &Triple = TM.getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return true; // don't do anything to this instruction.
SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
/*isDef=*/false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
for (auto &MO : Ops)
MIB.add(MO);
// Insert the Patchable Typed Event Call instruction, that gets lowered properly.
return true;
}
/// Returns an AttributeList representing the attributes applied to the return
/// value of the given call.
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
SmallVector<Attribute::AttrKind, 2> Attrs;
if (CLI.RetSExt)
Attrs.push_back(Attribute::SExt);
if (CLI.RetZExt)
Attrs.push_back(Attribute::ZExt);
if (CLI.IsInReg)
Attrs.push_back(Attribute::InReg);
return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
Attrs);
}
bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
unsigned NumArgs) {
MCContext &Ctx = MF->getContext();
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, SymName, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return lowerCallTo(CI, Sym, NumArgs);
}
bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
unsigned NumArgs) {
ImmutableCallSite CS(CI);
FunctionType *FTy = CS.getFunctionType();
Type *RetTy = CS.getType();
ArgListTy Args;
Args.reserve(NumArgs);
// Populate the argument list.
// Attributes for args start at offset 1, after the return attribute.
for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(&CS, ArgI);
Args.push_back(Entry);
}
TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args);
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
return lowerCallTo(CLI);
}
bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// Handle the incoming return values from the call.
CLI.clearIns();
SmallVector<EVT, 4> RetTys;
ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
// FIXME: sret demotion isn't supported yet - bail out.
if (!CanLowerReturn)
return false;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
MyFlags.ArgVT = VT;
MyFlags.Used = CLI.IsReturnValueUsed;
if (CLI.RetSExt)
MyFlags.Flags.setSExt();
if (CLI.RetZExt)
MyFlags.Flags.setZExt();
if (CLI.IsInReg)
MyFlags.Flags.setInReg();
CLI.Ins.push_back(MyFlags);
}
}
// Handle all of the outgoing arguments.
CLI.clearOuts();
for (auto &Arg : CLI.getArgs()) {
Type *FinalType = Arg.Ty;
if (Arg.IsByVal)
FinalType = cast<PointerType>(Arg.Ty)->getElementType();
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
FinalType, CLI.CallConv, CLI.IsVarArg);
ISD::ArgFlagsTy Flags;
if (Arg.IsZExt)
Flags.setZExt();
if (Arg.IsSExt)
Flags.setSExt();
if (Arg.IsInReg)
Flags.setInReg();
if (Arg.IsSRet)
Flags.setSRet();
if (Arg.IsSwiftSelf)
Flags.setSwiftSelf();
if (Arg.IsSwiftError)
Flags.setSwiftError();
if (Arg.IsByVal)
Flags.setByVal();
if (Arg.IsInAlloca) {
Flags.setInAlloca();
// Set the byval flag for CCAssignFn callbacks that don't know about
// inalloca. This way we can know how many bytes we should've allocated
// and how many bytes a callee cleanup function will pop. If we port
// inalloca to more targets, we'll have to add custom inalloca handling in
// the various CC lowering callbacks.
Flags.setByVal();
}
if (Arg.IsByVal || Arg.IsInAlloca) {
PointerType *Ty = cast<PointerType>(Arg.Ty);
Type *ElementTy = Ty->getElementType();
unsigned FrameSize =
DL.getTypeAllocSize(Arg.ByValType ? Arg.ByValType : ElementTy);
// For ByVal, alignment should come from FE. BE will guess if this info
// is not there, but there are cases it cannot get right.
unsigned FrameAlign = Arg.Alignment;
if (!FrameAlign)
FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
Flags.setByValSize(FrameSize);
Flags.setByValAlign(FrameAlign);
}
if (Arg.IsNest)
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
Flags.setOrigAlign(OriginalAlignment);
CLI.OutVals.push_back(Arg.Val);
CLI.OutFlags.push_back(Flags);
}
if (!fastLowerCall(CLI))
return false;
// Set all unused physreg defs as dead.
assert(CLI.Call && "No call instruction specified.");
CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
if (CLI.NumResultRegs && CLI.CS)
updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
// Set labels for heapallocsite call.
- if (CLI.CS && CLI.CS->getInstruction()->getMetadata("heapallocsite")) {
- MDNode *MD = CLI.CS->getInstruction()->getMetadata("heapallocsite");
- MF->addCodeViewHeapAllocSite(CLI.Call, MD);
- }
+ if (CLI.CS)
+ if (MDNode *MD = CLI.CS->getInstruction()->getMetadata("heapallocsite"))
+ CLI.Call->setHeapAllocMarker(*MF, MD);
return true;
}
bool FastISel::lowerCall(const CallInst *CI) {
ImmutableCallSite CS(CI);
FunctionType *FuncTy = CS.getFunctionType();
Type *RetTy = CS.getType();
ArgListTy Args;
ArgListEntry Entry;
Args.reserve(CS.arg_size());
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
Value *V = *i;
// Skip empty types
if (V->getType()->isEmptyTy())
continue;
Entry.Val = V;
Entry.Ty = V->getType();
// Skip the first return-type Attribute to get to params.
Entry.setAttributes(&CS, i - CS.arg_begin());
Args.push_back(Entry);
}
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within fastLowerCall.
bool IsTailCall = CI->isTailCall();
if (IsTailCall && !isInTailCallPosition(CS, TM))
IsTailCall = false;
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
.setTailCall(IsTailCall);
return lowerCallTo(CLI);
}
bool FastISel::selectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
// Handle simple inline asms.
if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
// If the inline asm has side effects, then make sure that no local value
// lives across by flushing the local value map.
if (IA->hasSideEffects())
flushLocalValueMap();
// Don't attempt to handle constraints.
if (!IA->getConstraintString().empty())
return false;
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::INLINEASM))
.addExternalSymbol(IA->getAsmString().c_str())
.addImm(ExtraInfo);
return true;
}
// Handle intrinsic function calls.
if (const auto *II = dyn_cast<IntrinsicInst>(Call))
return selectIntrinsicCall(II);
// Usually, it does not make sense to initialize a value,
// make an unrelated function call and use the value, because
// it tends to be spilled on the stack. So, we move the pointer
// to the last local value to the beginning of the block, so that
// all the values which have already been materialized,
// appear after the call. It also makes sense to skip intrinsics
// since they tend to be inlined.
flushLocalValueMap();
return lowerCall(Call);
}
bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
switch (II->getIntrinsicID()) {
default:
break;
// At -O0 we don't care about the lifetime intrinsics.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
// The donothing intrinsic does, well, nothing.
case Intrinsic::donothing:
// Neither does the sideeffect intrinsic.
case Intrinsic::sideeffect:
// Neither does the assume intrinsic; it's also OK not to codegen its operand.
case Intrinsic::assume:
return true;
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
assert(DI->getVariable() && "Missing variable");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
const Value *Address = DI->getAddress();
if (!Address || isa<UndefValue>(Address)) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
// Byval arguments with frame indices were already handled after argument
// lowering and before isel.
const auto *Arg =
dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
return true;
Optional<MachineOperand> Op;
if (unsigned Reg = lookUpRegForValue(Address))
Op = MachineOperand::CreateReg(Reg, false);
// If we have a VLA that has a "use" in a metadata node that's then used
// here but it has no other uses, then we have a problem. E.g.,
//
// int foo (const int *x) {
// char a[*x];
// return 0;
// }
//
// If we assign 'a' a vreg and fast isel later on has to use the selection
// DAG isel, it will want to copy the value to the vreg. However, there are
// no uses, which goes counter to what selection DAG isel expects.
if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
(!isa<AllocaInst>(Address) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
false);
if (Op) {
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
// A dbg.declare describes the address of a source variable, so lower it
// into an indirect DBG_VALUE.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
*Op, DI->getVariable(), DI->getExpression());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
DI->getVariable(), DI->getExpression());
} else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addCImm(CI)
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(CI->getZExtValue())
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(CF)
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (unsigned Reg = lookUpRegForValue(V)) {
// FIXME: This does not handle register-indirect values at offset 0.
bool IsIndirect = false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
DI->getVariable(), DI->getExpression());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
case Intrinsic::dbg_label: {
const DbgLabelInst *DI = cast<DbgLabelInst>(II);
assert(DI->getLabel() && "Missing label");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
return true;
}
case Intrinsic::objectsize: {
ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
unsigned long long Res = CI->isZero() ? -1ULL : 0;
Constant *ResCI = ConstantInt::get(II->getType(), Res);
unsigned ResultReg = getRegForValue(ResCI);
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::is_constant: {
Constant *ResCI = ConstantInt::get(II->getType(), 0);
unsigned ResultReg = getRegForValue(ResCI);
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::experimental_stackmap:
return selectStackmap(II);
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
case Intrinsic::xray_customevent:
return selectXRayCustomEvent(II);
case Intrinsic::xray_typedevent:
return selectXRayTypedEvent(II);
}
return fastLowerIntrinsicCall(II);
}
bool FastISel::selectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
!DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
// Check if the destination type is legal.
if (!TLI.isTypeLegal(DstVT))
return false;
// Check if the source operand is legal.
if (!TLI.isTypeLegal(SrcVT))
return false;
unsigned InputReg = getRegForValue(I->getOperand(0));
if (!InputReg)
// Unhandled operand. Halt "fast" selection and bail.
return false;
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
Opcode, InputReg, InputRegIsKill);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectBitCast(const User *I) {
// If the bitcast doesn't change the type, just use the operand value.
if (I->getType() == I->getOperand(0)->getType()) {
unsigned Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
updateValueMap(I, Reg);
return true;
}
// Bitcasts of other values become reg-reg copies or BITCAST operators.
EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstEVT = TLI.getValueType(DL, I->getType());
if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
!TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
// Unhandled type. Halt "fast" selection and bail.
return false;
MVT SrcVT = SrcEVT.getSimpleVT();
MVT DstVT = DstEVT.getSimpleVT();
unsigned Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0;
if (SrcVT == DstVT) {
const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
// Don't attempt a cross-class copy. It will likely fail.
if (SrcClass == DstClass) {
ResultReg = createResultReg(DstClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
}
}
// If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg)
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
// Remove local value instructions starting from the instruction after
// SavedLastLocalValue to the current function insert point.
void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
{
MachineInstr *CurLastLocalValue = getLastLocalValue();
if (CurLastLocalValue != SavedLastLocalValue) {
// Find the first local value instruction to be deleted.
// This is the instruction after SavedLastLocalValue if it is non-NULL.
// Otherwise it's the first instruction in the block.
MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
if (SavedLastLocalValue)
++FirstDeadInst;
else
FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
setLastLocalValue(SavedLastLocalValue);
removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
}
}
bool FastISel::selectInstruction(const Instruction *I) {
MachineInstr *SavedLastLocalValue = getLastLocalValue();
// Just before the terminator instruction, insert instructions to
// feed PHI nodes in successor blocks.
if (I->isTerminator()) {
if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
// PHI node handling may have generated local value instructions,
// even though it failed to handle all PHI nodes.
// We remove these instructions because SelectionDAGISel will generate
// them again.
removeDeadLocalValueCode(SavedLastLocalValue);
return false;
}
}
// FastISel does not handle any operand bundles except OB_funclet.
if (ImmutableCallSite CS = ImmutableCallSite(I))
for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
return false;
DbgLoc = I->getDebugLoc();
SavedInsertPt = FuncInfo.InsertPt;
if (const auto *Call = dyn_cast<CallInst>(I)) {
const Function *F = Call->getCalledFunction();
LibFunc Func;
// As a special case, don't handle calls to builtin library functions that
// may be translated directly to target instructions.
if (F && !F->hasLocalLinkage() && F->hasName() &&
LibInfo->getLibFunc(F->getName(), Func) &&
LibInfo->hasOptimizedCodeGen(Func))
return false;
// Don't handle Intrinsic::trap if a trap function is specified.
if (F && F->getIntrinsicID() == Intrinsic::trap &&
Call->hasFnAttr("trap-func-name"))
return false;
}
// First, try doing target-independent selection.
if (!SkipTargetIndependentISel) {
if (selectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
DbgLoc = DebugLoc();
return true;
}
// Remove dead code.
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
SavedInsertPt = FuncInfo.InsertPt;
}
// Next, try calling the target to attempt to handle the instruction.
if (fastSelectInstruction(I)) {
++NumFastIselSuccessTarget;
DbgLoc = DebugLoc();
return true;
}
// Remove dead code.
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
DbgLoc = DebugLoc();
// Undo phi node updates, because they will be added again by SelectionDAG.
if (I->isTerminator()) {
// PHI node handling may have generated local value instructions.
// We remove them because SelectionDAGISel will generate them again.
removeDeadLocalValueCode(SavedLastLocalValue);
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
}
return false;
}
/// Emit an unconditional branch to the given block, unless it is the immediate
/// (fall-through) successor, and update the CFG.
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
const DebugLoc &DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// For more accurate line information if this is the only instruction
// in the block then emit it, otherwise we have the unconditional
// fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
SmallVector<MachineOperand, 0>(), DbgLoc);
}
if (FuncInfo.BPI) {
auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
} else
FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
}
void FastISel::finishCondBranch(const BasicBlock *BranchBB,
MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB) {
// Add TrueMBB as successor unless it is equal to the FalseMBB: This can
// happen in degenerate IR and MachineIR forbids to have a block twice in the
// successor/predecessor lists.
if (TrueMBB != FalseMBB) {
if (FuncInfo.BPI) {
auto BranchProbability =
FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
} else
FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
}
fastEmitBranch(FalseMBB, DbgLoc);
}
/// Emit an FNeg operation.
bool FastISel::selectFNeg(const User *I, const Value *In) {
unsigned OpReg = getRegForValue(In);
if (!OpReg)
return false;
bool OpRegIsKill = hasTrivialKill(In);
// If the target has ISD::FNEG, use it.
EVT VT = TLI.getValueType(DL, I->getType());
unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill);
if (ResultReg) {
updateValueMap(I, ResultReg);
return true;
}
// Bitcast the value to integer, twiddle the sign bit with xor,
// and then bitcast it back to floating-point.
if (VT.getSizeInBits() > 64)
return false;
EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
if (!TLI.isTypeLegal(IntVT))
return false;
unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BITCAST, OpReg, OpRegIsKill);
if (!IntReg)
return false;
unsigned IntResultReg = fastEmit_ri_(
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg)
return false;
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
IntResultReg, /*IsKill=*/true);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectExtractValue(const User *U) {
const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
if (!EVI)
return false;
// Make sure we only try to handle extracts with a legal result. But also
// allow i1 because it's easy.
EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
if (!RealVT.isSimple())
return false;
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
return false;
const Value *Op0 = EVI->getOperand(0);
Type *AggTy = Op0->getType();
// Get the base result register.
unsigned ResultReg;
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
if (I != FuncInfo.ValueMap.end())
ResultReg = I->second;
else if (isa<Instruction>(Op0))
ResultReg = FuncInfo.InitializeRegForValue(Op0);
else
return false; // fast-isel can't handle aggregate constants at the moment
// Get the actual result register, which is an offset from the base register.
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
for (unsigned i = 0; i < VTIndex; i++)
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
updateValueMap(EVI, ResultReg);
return true;
}
bool FastISel::selectOperator(const User *I, unsigned Opcode) {
switch (Opcode) {
case Instruction::Add:
return selectBinaryOp(I, ISD::ADD);
case Instruction::FAdd:
return selectBinaryOp(I, ISD::FADD);
case Instruction::Sub:
return selectBinaryOp(I, ISD::SUB);
case Instruction::FSub: {
// FNeg is currently represented in LLVM IR as a special case of FSub.
Value *X;
if (match(I, m_FNeg(m_Value(X))))
return selectFNeg(I, X);
return selectBinaryOp(I, ISD::FSUB);
}
case Instruction::Mul:
return selectBinaryOp(I, ISD::MUL);
case Instruction::FMul:
return selectBinaryOp(I, ISD::FMUL);
case Instruction::SDiv:
return selectBinaryOp(I, ISD::SDIV);
case Instruction::UDiv:
return selectBinaryOp(I, ISD::UDIV);
case Instruction::FDiv:
return selectBinaryOp(I, ISD::FDIV);
case Instruction::SRem:
return selectBinaryOp(I, ISD::SREM);
case Instruction::URem:
return selectBinaryOp(I, ISD::UREM);
case Instruction::FRem:
return selectBinaryOp(I, ISD::FREM);
case Instruction::Shl:
return selectBinaryOp(I, ISD::SHL);
case Instruction::LShr:
return selectBinaryOp(I, ISD::SRL);
case Instruction::AShr:
return selectBinaryOp(I, ISD::SRA);
case Instruction::And:
return selectBinaryOp(I, ISD::AND);
case Instruction::Or:
return selectBinaryOp(I, ISD::OR);
case Instruction::Xor:
return selectBinaryOp(I, ISD::XOR);
case Instruction::FNeg:
return selectFNeg(I, I->getOperand(0));
case Instruction::GetElementPtr:
return selectGetElementPtr(I);
case Instruction::Br: {
const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
fastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
// Conditional branches are not handed yet.
// Halt "fast" selection and bail.
return false;
}
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
// Dynamic-sized alloca is not handled yet.
return false;
case Instruction::Call:
// On AIX, call lowering uses the DAG-ISEL path currently so that the
// callee of the direct function call instruction will be mapped to the
// symbol for the function's entry point, which is distinct from the
// function descriptor symbol. The latter is the symbol whose XCOFF symbol
// name is the C-linkage name of the source level function.
if (TM.getTargetTriple().isOSAIX())
return false;
return selectCall(I);
case Instruction::BitCast:
return selectBitCast(I);
case Instruction::FPToSI:
return selectCast(I, ISD::FP_TO_SINT);
case Instruction::ZExt:
return selectCast(I, ISD::ZERO_EXTEND);
case Instruction::SExt:
return selectCast(I, ISD::SIGN_EXTEND);
case Instruction::Trunc:
return selectCast(I, ISD::TRUNCATE);
case Instruction::SIToFP:
return selectCast(I, ISD::SINT_TO_FP);
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (DstVT.bitsGT(SrcVT))
return selectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
return selectCast(I, ISD::TRUNCATE);
unsigned Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
updateValueMap(I, Reg);
return true;
}
case Instruction::ExtractValue:
return selectExtractValue(I);
case Instruction::PHI:
llvm_unreachable("FastISel shouldn't visit PHI nodes!");
default:
// Unhandled instruction. Halt "fast" selection and bail.
return false;
}
}
FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel)
: FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
TII(*MF->getSubtarget().getInstrInfo()),
TLI(*MF->getSubtarget().getTargetLowering()),
TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
SkipTargetIndependentISel(SkipTargetIndependentISel) {}
FastISel::~FastISel() = default;
bool FastISel::fastLowerArguments() { return false; }
bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
return false;
}
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/) {
return 0;
}
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/,
bool /*Op1IsKill*/) {
return 0;
}
unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
const ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, uint64_t /*Imm*/) {
return 0;
}
/// This method is a wrapper of fastEmit_ri. It first tries to emit an
/// instruction with an immediate operand using fastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
/// fastEmit_rr instead.
unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm, MVT ImmType) {
// If this is a multiply by a power of two, emit this as a shift left.
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
Opcode = ISD::SHL;
Imm = Log2_64(Imm);
} else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
// div x, 8 -> srl x, 3
Opcode = ISD::SRL;
Imm = Log2_64(Imm);
}
// Horrible hack (to be removed), check to make sure shift amounts are
// in-range.
if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
Imm >= VT.getSizeInBits())
return 0;
// First check if immediate type is legal. If not, we can't use the ri form.
unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
if (ResultReg)
return ResultReg;
unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
bool IsImmKill = true;
if (!MaterialReg) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
IntegerType *ITy =
IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
if (!MaterialReg)
return 0;
// FIXME: If the materialized register here has no uses yet then this
// will be the first use and we should be able to mark it as killed.
// However, the local value area for materialising constant expressions
// grows down, not up, which means that any constant expressions we generate
// later which also use 'Imm' could be after this instruction and therefore
// after this kill.
IsImmKill = false;
}
return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
}
unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
return MRI.createVirtualRegister(RC);
}
unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum) {
if (TargetRegisterInfo::isVirtualRegister(Op)) {
const TargetRegisterClass *RegClass =
TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
if (!MRI.constrainRegClass(Op, RegClass)) {
// If it's not legal to COPY between the register classes, something
// has gone very wrong before we got here.
unsigned NewOp = createResultReg(RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
return NewOp;
}
}
return Op;
}
unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
return ResultReg;
}
unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, unsigned Op2,
bool Op2IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addReg(Op2, getKillRegState(Op2IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addReg(Op2, getKillRegState(Op2IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1,
uint64_t Imm2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm1)
.addImm(Imm2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm1)
.addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
const ConstantFP *FPImm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addFPImm(FPImm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(FPImm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
bool Op0IsKill, uint32_t Idx) {
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
/// Emit MachineInstrs to compute the value of Op with all but the least
/// significant bit set to zero.
unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
}
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
/// nodes as input. We cannot just directly add them, because expansion
/// might result in multiple MBB's for one BB. As such, the start of the
/// BB might correspond to a different MBB than the end.
bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const Instruction *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin()))
continue;
MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
if (!SuccsHandled.insert(SuccMBB).second)
continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
for (const PHINode &PN : SuccBB->phis()) {
// Ignore dead phi's.
if (PN.use_empty())
continue;
// Only handle legal types. Two interesting things to note here. First,
// by bailing out early, we may leave behind some dead instructions,
// since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
// own moves. Second, this check is necessary because FastISel doesn't
// use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
}
const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
// Set the DebugLoc for the copy. Prefer the location of the operand
// if there is one; use the location of the PHI otherwise.
DbgLoc = PN.getDebugLoc();
if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
DbgLoc = Inst->getDebugLoc();
unsigned Reg = getRegForValue(PHIOp);
if (!Reg) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
DbgLoc = DebugLoc();
}
}
return true;
}
bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
assert(LI->hasOneUse() &&
"tryToFoldLoad expected a LoadInst with a single use");
// We know that the load has a single use, but don't know what it is. If it
// isn't one of the folded instructions, then we can't succeed here. Handle
// this by scanning the single-use users of the load until we get to FoldInst.
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
const Instruction *TheUser = LI->user_back();
while (TheUser != FoldInst && // Scan up until we find FoldInst.
// Stay in the right block.
TheUser->getParent() == FoldInst->getParent() &&
--MaxUsers) { // Don't scan too far.
// If there are multiple or no uses of this instruction, then bail out.
if (!TheUser->hasOneUse())
return false;
TheUser = TheUser->user_back();
}
// If we didn't find the fold instruction, then we failed to collapse the
// sequence.
if (TheUser != FoldInst)
return false;
// Don't try to fold volatile loads. Target has to deal with alignment
// constraints.
if (LI->isVolatile())
return false;
// Figure out which vreg this is going into. If there is no assigned vreg yet
// then there actually was no reference to it. Perhaps the load is referenced
// by a dead instruction.
unsigned LoadReg = getRegForValue(LI);
if (!LoadReg)
return false;
// We can't fold if this vreg has no uses or more than one use. Multiple uses
// may mean that the instruction got lowered to multiple MIs, or the use of
// the loaded value ended up being multiple operands of the result.
if (!MRI.hasOneUse(LoadReg))
return false;
MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
MachineInstr *User = RI->getParent();
// Set the insertion point properly. Folding the load can cause generation of
// other random instructions (like sign extends) for addressing modes; make
// sure they get inserted in a logical place before the new instruction.
FuncInfo.InsertPt = User;
FuncInfo.MBB = User->getParent();
// Ask the target to try folding the load.
return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
}
bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
// Must be an add.
if (!isa<AddOperator>(Add))
return false;
// Type size needs to match.
if (DL.getTypeSizeInBits(GEP->getType()) !=
DL.getTypeSizeInBits(Add->getType()))
return false;
// Must be in the same basic block.
if (isa<Instruction>(Add) &&
FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
return false;
// Must have a constant operand.
return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
}
MachineMemOperand *
FastISel::createMachineMemOperandFor(const Instruction *I) const {
const Value *Ptr;
Type *ValTy;
unsigned Alignment;
MachineMemOperand::Flags Flags;
bool IsVolatile;
if (const auto *LI = dyn_cast<LoadInst>(I)) {
Alignment = LI->getAlignment();
IsVolatile = LI->isVolatile();
Flags = MachineMemOperand::MOLoad;
Ptr = LI->getPointerOperand();
ValTy = LI->getType();
} else if (const auto *SI = dyn_cast<StoreInst>(I)) {
Alignment = SI->getAlignment();
IsVolatile = SI->isVolatile();
Flags = MachineMemOperand::MOStore;
Ptr = SI->getPointerOperand();
ValTy = SI->getValueOperand()->getType();
} else
return nullptr;
bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
bool IsDereferenceable =
I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr;
const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
AAMDNodes AAInfo;
I->getAAMetadata(AAInfo);
if (Alignment == 0) // Ensure that codegen never sees alignment 0.
Alignment = DL.getABITypeAlignment(ValTy);
unsigned Size = DL.getTypeStoreSize(ValTy);
if (IsVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (IsNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (IsDereferenceable)
Flags |= MachineMemOperand::MODereferenceable;
if (IsInvariant)
Flags |= MachineMemOperand::MOInvariant;
return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
Alignment, AAInfo, Ranges);
}
CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
// If both operands are the same, then try to optimize or fold the cmp.
CmpInst::Predicate Predicate = CI->getPredicate();
if (CI->getOperand(0) != CI->getOperand(1))
return Predicate;
switch (Predicate) {
default: llvm_unreachable("Invalid predicate!");
case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
}
return Predicate;
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp (revision 356465)
@@ -1,1033 +1,1033 @@
//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This implements the ScheduleDAG class, which is a base class used by
// scheduling implementation classes.
//
//===----------------------------------------------------------------------===//
#include "ScheduleDAGSDNodes.h"
#include "InstrEmitter.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
STATISTIC(LoadsClustered, "Number of loads clustered together");
// This allows the latency-based scheduler to notice high latency instructions
// without a target itinerary. The choice of number here has more to do with
// balancing scheduler heuristics than with the actual machine latency.
static cl::opt<int> HighLatencyCycles(
"sched-high-latency-cycles", cl::Hidden, cl::init(10),
cl::desc("Roughly estimate the number of cycles that 'long latency'"
"instructions take for targets with no itinerary"));
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
: ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
InstrItins(mf.getSubtarget().getInstrItineraryData()) {}
/// Run - perform scheduling.
///
void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
BB = bb;
DAG = dag;
// Clear the scheduler's SUnit DAG.
ScheduleDAG::clearDAG();
Sequence.clear();
// Invoke the target's selection of scheduler.
Schedule();
}
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG
const SUnit *Addr = nullptr;
if (!SUnits.empty())
Addr = &SUnits[0];
#endif
SUnits.emplace_back(N, (unsigned)SUnits.size());
assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
SUnits.back().OrigNode = &SUnits.back();
SUnit *SU = &SUnits.back();
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
if (!N ||
(N->isMachineOpcode() &&
N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF))
SU->SchedulingPref = Sched::None;
else
SU->SchedulingPref = TLI.getSchedulingPreference(N);
return SU;
}
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = newSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
SU->isVRegCycle = Old->isVRegCycle;
SU->isCall = Old->isCall;
SU->isCallOp = Old->isCallOp;
SU->isTwoAddress = Old->isTwoAddress;
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
SU->isScheduleHigh = Old->isScheduleHigh;
SU->isScheduleLow = Old->isScheduleLow;
SU->SchedulingPref = Old->SchedulingPref;
Old->isCloned = true;
return SU;
}
/// CheckForPhysRegDependency - Check if the dependency between def and use of
/// a specified operand is a physical register dependency. If so, returns the
/// register and the cost of copying the register.
static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII,
unsigned &PhysReg, int &Cost) {
if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
return;
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
return;
unsigned ResNo = User->getOperand(2).getResNo();
if (Def->getOpcode() == ISD::CopyFromReg &&
cast<RegisterSDNode>(Def->getOperand(1))->getReg() == Reg) {
PhysReg = Reg;
} else if (Def->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
if (ResNo >= II.getNumDefs() &&
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg)
PhysReg = Reg;
}
if (PhysReg != 0) {
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, Def->getSimpleValueType(ResNo));
Cost = RC->getCopyCost();
}
}
// Helper for AddGlue to clone node operands.
static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG, ArrayRef<EVT> VTs,
SDValue ExtraOper = SDValue()) {
SmallVector<SDValue, 8> Ops(N->op_begin(), N->op_end());
if (ExtraOper.getNode())
Ops.push_back(ExtraOper);
SDVTList VTList = DAG->getVTList(VTs);
MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
// Store memory references.
SmallVector<MachineMemOperand *, 2> MMOs;
if (MN)
MMOs.assign(MN->memoperands_begin(), MN->memoperands_end());
DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops);
// Reset the memory references
if (MN)
DAG->setNodeMemRefs(MN, MMOs);
}
static bool AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
SDNode *GlueDestNode = Glue.getNode();
// Don't add glue from a node to itself.
if (GlueDestNode == N) return false;
// Don't add a glue operand to something that already uses glue.
if (GlueDestNode &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
return false;
}
// Don't add glue to something that already has a glue value.
if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false;
SmallVector<EVT, 4> VTs(N->value_begin(), N->value_end());
if (AddGlue)
VTs.push_back(MVT::Glue);
CloneNodeWithValues(N, DAG, VTs, Glue);
return true;
}
// Cleanup after unsuccessful AddGlue. Use the standard method of morphing the
// node even though simply shrinking the value list is sufficient.
static void RemoveUnusedGlue(SDNode *N, SelectionDAG *DAG) {
assert((N->getValueType(N->getNumValues() - 1) == MVT::Glue &&
!N->hasAnyUseOfValue(N->getNumValues() - 1)) &&
"expected an unused glue value");
CloneNodeWithValues(N, DAG,
makeArrayRef(N->value_begin(), N->getNumValues() - 1));
}
/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
/// This function finds loads of the same base and different offsets. If the
/// offsets are not far apart (target specific), it add MVT::Glue inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
SDNode *Chain = nullptr;
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
Chain = Node->getOperand(NumOps-1).getNode();
if (!Chain)
return;
// Skip any load instruction that has a tied input. There may be an additional
// dependency requiring a different order than by increasing offsets, and the
// added glue may introduce a cycle.
auto hasTiedInput = [this](const SDNode *N) {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned I = 0; I != MCID.getNumOperands(); ++I) {
if (MCID.getOperandConstraint(I, MCOI::TIED_TO) != -1)
return true;
}
return false;
};
// Look for other loads of the same chain. Find loads that are loading from
// the same base pointer and different offsets.
SmallPtrSet<SDNode*, 16> Visited;
SmallVector<int64_t, 4> Offsets;
DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
bool Cluster = false;
SDNode *Base = Node;
if (hasTiedInput(Base))
return;
// This algorithm requires a reasonably low use count before finding a match
// to avoid uselessly blowing up compile time in large blocks.
unsigned UseCount = 0;
for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
I != E && UseCount < 100; ++I, ++UseCount) {
SDNode *User = *I;
if (User == Node || !Visited.insert(User).second)
continue;
int64_t Offset1, Offset2;
if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
Offset1 == Offset2 ||
hasTiedInput(User)) {
// FIXME: Should be ok if they addresses are identical. But earlier
// optimizations really should have eliminated one of the loads.
continue;
}
if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
Offsets.push_back(Offset1);
O2SMap.insert(std::make_pair(Offset2, User));
Offsets.push_back(Offset2);
if (Offset2 < Offset1)
Base = User;
Cluster = true;
// Reset UseCount to allow more matches.
UseCount = 0;
}
if (!Cluster)
return;
// Sort them in increasing order.
llvm::sort(Offsets);
// Check if the loads are close enough.
SmallVector<SDNode*, 4> Loads;
unsigned NumLoads = 0;
int64_t BaseOff = Offsets[0];
SDNode *BaseLoad = O2SMap[BaseOff];
Loads.push_back(BaseLoad);
for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
int64_t Offset = Offsets[i];
SDNode *Load = O2SMap[Offset];
if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
break; // Stop right here. Ignore loads that are further away.
Loads.push_back(Load);
++NumLoads;
}
if (NumLoads == 0)
return;
// Cluster loads by adding MVT::Glue outputs and inputs. This also
// ensure they are scheduled in order of increasing addresses.
SDNode *Lead = Loads[0];
SDValue InGlue = SDValue(nullptr, 0);
if (AddGlue(Lead, InGlue, true, DAG))
InGlue = SDValue(Lead, Lead->getNumValues() - 1);
for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
bool OutGlue = I < E - 1;
SDNode *Load = Loads[I];
// If AddGlue fails, we could leave an unsused glue value. This should not
// cause any
if (AddGlue(Load, InGlue, OutGlue, DAG)) {
if (OutGlue)
InGlue = SDValue(Load, Load->getNumValues() - 1);
++LoadsClustered;
}
else if (!OutGlue && InGlue.getNode())
RemoveUnusedGlue(InGlue.getNode(), DAG);
}
}
/// ClusterNodes - Cluster certain nodes which should be scheduled together.
///
void ScheduleDAGSDNodes::ClusterNodes() {
for (SDNode &NI : DAG->allnodes()) {
SDNode *Node = &NI;
if (!Node || !Node->isMachineOpcode())
continue;
unsigned Opc = Node->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
if (MCID.mayLoad())
// Cluster loads from "near" addresses into combined SUnits.
ClusterNeighboringLoads(Node);
}
}
void ScheduleDAGSDNodes::BuildSchedUnits() {
// During scheduling, the NodeId field of SDNode is used to map SDNodes
// to their associated SUnits by holding SUnits table indices. A value
// of -1 means the SDNode does not yet have an associated SUnit.
unsigned NumNodes = 0;
for (SDNode &NI : DAG->allnodes()) {
NI.setNodeId(-1);
++NumNodes;
}
// Reserve entries in the vector for each of the SUnits we are creating. This
// ensure that reallocation of the vector won't happen, so SUnit*'s won't get
// invalidated.
// FIXME: Multiply by 2 because we may clone nodes during scheduling.
// This is a temporary workaround.
SUnits.reserve(NumNodes * 2);
// Add all nodes in depth first order.
SmallVector<SDNode*, 64> Worklist;
SmallPtrSet<SDNode*, 32> Visited;
Worklist.push_back(DAG->getRoot().getNode());
Visited.insert(DAG->getRoot().getNode());
SmallVector<SUnit*, 8> CallSUnits;
while (!Worklist.empty()) {
SDNode *NI = Worklist.pop_back_val();
// Add all operands to the worklist unless they've already been added.
for (const SDValue &Op : NI->op_values())
if (Visited.insert(Op.getNode()).second)
Worklist.push_back(Op.getNode());
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
continue;
// If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue;
SUnit *NodeSUnit = newSUnit(NI);
// See if anything is glued to this node, if so, add them to glued
// nodes. Nodes can have at most one glue input and one glue output. Glue
// is required to be the last operand and result of a node.
// Scan up to find glued preds.
SDNode *N = NI;
while (N->getNumOperands() &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
N = N->getOperand(N->getNumOperands()-1).getNode();
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
NodeSUnit->isCall = true;
}
// Scan down to find any glued succs.
N = NI;
while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
SDValue GlueVal(N, N->getNumValues()-1);
// There are either zero or one users of the Glue result.
bool HasGlueUse = false;
for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
UI != E; ++UI)
if (GlueVal.isOperandOf(*UI)) {
HasGlueUse = true;
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
N = *UI;
if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
NodeSUnit->isCall = true;
break;
}
if (!HasGlueUse) break;
}
if (NodeSUnit->isCall)
CallSUnits.push_back(NodeSUnit);
// Schedule zero-latency TokenFactor below any nodes that may increase the
// schedule height. Otherwise, ancestors of the TokenFactor may appear to
// have false stalls.
if (NI->getOpcode() == ISD::TokenFactor)
NodeSUnit->isScheduleLow = true;
// If there are glue operands involved, N is now the bottom-most node
// of the sequence of nodes that are glued together.
// Update the SUnit.
NodeSUnit->setNode(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
// Compute NumRegDefsLeft. This must be done before AddSchedEdges.
InitNumRegDefsLeft(NodeSUnit);
// Assign the Latency field of NodeSUnit using target-provided information.
computeLatency(NodeSUnit);
}
// Find all call operands.
while (!CallSUnits.empty()) {
SUnit *SU = CallSUnits.pop_back_val();
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->getOpcode() != ISD::CopyToReg)
continue;
SDNode *SrcN = SUNode->getOperand(2).getNode();
if (isPassiveNode(SrcN)) continue; // Not scheduled.
SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
SrcSU->isCallOp = true;
}
}
}
void ScheduleDAGSDNodes::AddSchedEdges() {
const TargetSubtargetInfo &ST = MF.getSubtarget();
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = forceUnitLatencies();
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
SUnit *SU = &SUnits[su];
SDNode *MainNode = SU->getNode();
if (MainNode->isMachineOpcode()) {
unsigned Opc = MainNode->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
SU->isTwoAddress = true;
break;
}
}
if (MCID.isCommutable())
SU->isCommutable = true;
}
// Find all predecessors and successors of the group.
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
if (N->isMachineOpcode() &&
TII->get(N->getMachineOpcode()).getImplicitDefs()) {
SU->hasPhysRegClobbers = true;
unsigned NumUsed = InstrEmitter::CountResults(N);
while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
--NumUsed; // Skip over unused values at the end.
if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
SU->hasPhysRegDefs = true;
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *OpN = N->getOperand(i).getNode();
if (isPassiveNode(OpN)) continue; // Not scheduled.
SUnit *OpSU = &SUnits[OpN->getNodeId()];
assert(OpSU && "Node has no SUnit!");
if (OpSU == SU) continue; // In the same group.
EVT OpVT = N->getOperand(i).getValueType();
assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
unsigned PhysReg = 0;
int Cost = 1;
// Determine if this is a physical register dependency.
CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
assert((PhysReg == 0 || !isChain) &&
"Chain dependence via physreg data?");
// FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
// emits a copy from the physical register to a virtual register unless
// it requires a cross class copy (cost < 0). That means we are only
// treating "expensive to copy" register dependency as physical register
// dependency. This may change in the future though.
if (Cost >= 0 && !StressSched)
PhysReg = 0;
// If this is a ctrl dep, latency is 1.
unsigned OpLatency = isChain ? 1 : OpSU->Latency;
// Special-case TokenFactor chains as zero-latency.
if(isChain && OpN->getOpcode() == ISD::TokenFactor)
OpLatency = 0;
SDep Dep = isChain ? SDep(OpSU, SDep::Barrier)
: SDep(OpSU, SDep::Data, PhysReg);
Dep.setLatency(OpLatency);
if (!isChain && !UnitLatencies) {
computeOperandLatency(OpN, N, i, Dep);
ST.adjustSchedDependency(OpSU, SU, Dep);
}
if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) {
// Multiple register uses are combined in the same SUnit. For example,
// we could have a set of glued nodes with all their defs consumed by
// another set of glued nodes. Register pressure tracking sees this as
// a single use, so to keep pressure balanced we reduce the defs.
//
// We can't tell (without more book-keeping) if this results from
// glued nodes or duplicate operands. As long as we don't reduce
// NumRegDefsLeft to zero, we handle the common cases well.
--OpSU->NumRegDefsLeft;
}
}
}
}
}
/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// glued together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
// Cluster certain nodes which should be scheduled together.
ClusterNodes();
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
AddSchedEdges();
}
// Initialize NumNodeDefs for the current Node's opcode.
void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() {
// Check for phys reg copy.
if (!Node)
return;
if (!Node->isMachineOpcode()) {
if (Node->getOpcode() == ISD::CopyFromReg)
NodeNumDefs = 1;
else
NodeNumDefs = 0;
return;
}
unsigned POpc = Node->getMachineOpcode();
if (POpc == TargetOpcode::IMPLICIT_DEF) {
// No register need be allocated for this.
NodeNumDefs = 0;
return;
}
if (POpc == TargetOpcode::PATCHPOINT &&
Node->getValueType(0) == MVT::Other) {
// PATCHPOINT is defined to have one result, but it might really have none
// if we're not using CallingConv::AnyReg. Don't mistake the chain for a
// real definition.
NodeNumDefs = 0;
return;
}
unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs();
// Some instructions define regs that are not represented in the selection DAG
// (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues.
NodeNumDefs = std::min(Node->getNumValues(), NRegDefs);
DefIdx = 0;
}
// Construct a RegDefIter for this SUnit and find the first valid value.
ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU,
const ScheduleDAGSDNodes *SD)
: SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) {
InitNodeNumDefs();
Advance();
}
// Advance to the next valid value defined by the SUnit.
void ScheduleDAGSDNodes::RegDefIter::Advance() {
for (;Node;) { // Visit all glued nodes.
for (;DefIdx < NodeNumDefs; ++DefIdx) {
if (!Node->hasAnyUseOfValue(DefIdx))
continue;
ValueType = Node->getSimpleValueType(DefIdx);
++DefIdx;
return; // Found a normal regdef.
}
Node = Node->getGluedNode();
if (!Node) {
return; // No values left to visit.
}
InitNodeNumDefs();
}
}
void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
assert(SU->NumRegDefsLeft == 0 && "expect a new node");
for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) {
assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected");
++SU->NumRegDefsLeft;
}
}
void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
SDNode *N = SU->getNode();
// TokenFactor operands are considered zero latency, and some schedulers
// (e.g. Top-Down list) may rely on the fact that operand latency is nonzero
// whenever node latency is nonzero.
if (N && N->getOpcode() == ISD::TokenFactor) {
SU->Latency = 0;
return;
}
// Check to see if the scheduler cares about latencies.
if (forceUnitLatencies()) {
SU->Latency = 1;
return;
}
if (!InstrItins || InstrItins->isEmpty()) {
if (N && N->isMachineOpcode() &&
TII->isHighLatencyDef(N->getMachineOpcode()))
SU->Latency = HighLatencyCycles;
else
SU->Latency = 1;
return;
}
// Compute the latency for the node. We use the sum of the latencies for
// all nodes glued together into this SUnit.
SU->Latency = 0;
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
if (N->isMachineOpcode())
SU->Latency += TII->getInstrLatency(InstrItins, N);
}
void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const{
// Check to see if the scheduler cares about latencies.
if (forceUnitLatencies())
return;
if (dep.getKind() != SDep::Data)
return;
unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
if (Use->isMachineOpcode())
// Adjust the use operand index by num of defs.
OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs();
int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx);
if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
!BB->succ_empty()) {
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
// This copy is a liveout value. It is likely coalesced, so reduce the
// latency so not to penalize the def.
// FIXME: need target specific adjustment here?
Latency = (Latency > 1) ? Latency - 1 : 1;
}
if (Latency >= 0)
dep.setLatency(Latency);
}
void ScheduleDAGSDNodes::dumpNode(const SUnit &SU) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpNodeName(SU);
dbgs() << ": ";
if (!SU.getNode()) {
dbgs() << "PHYS REG COPY\n";
return;
}
SU.getNode()->dump(DAG);
dbgs() << "\n";
SmallVector<SDNode *, 4> GluedNodes;
for (SDNode *N = SU.getNode()->getGluedNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
dbgs() << " ";
GluedNodes.back()->dump(DAG);
dbgs() << "\n";
GluedNodes.pop_back();
}
#endif
}
void ScheduleDAGSDNodes::dump() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
if (EntrySU.getNode() != nullptr)
dumpNodeAll(EntrySU);
for (const SUnit &SU : SUnits)
dumpNodeAll(SU);
if (ExitSU.getNode() != nullptr)
dumpNodeAll(ExitSU);
#endif
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScheduleDAGSDNodes::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
dumpNode(*SU);
else
dbgs() << "**** NOOP ****\n";
}
}
#endif
#ifndef NDEBUG
/// VerifyScheduledSequence - Verify that all SUnits were scheduled and that
/// their state is consistent with the nodes listed in Sequence.
///
void ScheduleDAGSDNodes::VerifyScheduledSequence(bool isBottomUp) {
unsigned ScheduledNodes = ScheduleDAG::VerifyScheduledDAG(isBottomUp);
unsigned Noops = 0;
for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
if (!Sequence[i])
++Noops;
assert(Sequence.size() - Noops == ScheduledNodes &&
"The number of nodes scheduled doesn't match the expected number!");
}
#endif // NDEBUG
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
static void
ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
if (!N->getHasDebugValue())
return;
// Opportunistically insert immediate dbg_value uses, i.e. those with the same
// source order number as N.
MachineBasicBlock *BB = Emitter.getBlock();
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
for (auto DV : DAG->GetDbgValues(N)) {
if (DV->isEmitted())
continue;
unsigned DVOrder = DV->getOrder();
if (!Order || DVOrder == Order) {
MachineInstr *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap);
if (DbgMI) {
Orders.push_back({DVOrder, DbgMI});
BB->insert(InsertPos, DbgMI);
}
}
}
}
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void
ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
DenseMap<SDValue, unsigned> &VRBaseMap,
SmallVectorImpl<std::pair<unsigned, MachineInstr *>> &Orders,
SmallSet<unsigned, 8> &Seen, MachineInstr *NewInsn) {
unsigned Order = N->getIROrder();
if (!Order || Seen.count(Order)) {
// Process any valid SDDbgValues even if node does not have any order
// assigned.
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
return;
}
// If a new instruction was generated for this Order number, record it.
// Otherwise, leave this order number unseen: we will either find later
// instructions for it, or leave it unseen if there were no instructions at
// all.
if (NewInsn) {
Seen.insert(Order);
Orders.push_back({Order, NewInsn});
}
// Even if no instruction was generated, a Value may have become defined via
// earlier nodes. Try to process them now.
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
void ScheduleDAGSDNodes::
EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
MachineBasicBlock::iterator InsertPos) {
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
if (I->getSUnit()->CopyDstRC) {
// Copy to physical register.
DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
// Find the destination physical register.
unsigned Reg = 0;
for (SUnit::const_succ_iterator II = SU->Succs.begin(),
EE = SU->Succs.end(); II != EE; ++II) {
if (II->isCtrl()) continue; // ignore chain preds
if (II->getReg()) {
Reg = II->getReg();
break;
}
}
BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
.addReg(VRI->second);
} else {
// Copy from physical register.
assert(I->getReg() && "Unknown physical register!");
unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
.addReg(I->getReg());
}
break;
}
}
/// EmitSchedule - Emit the machine code in scheduled order. Return the new
/// InsertPos and MachineBasicBlock that contains this insertion
/// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does
/// not necessarily refer to returned BB. The emitter may split blocks.
MachineBasicBlock *ScheduleDAGSDNodes::
EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
SmallSet<unsigned, 8> Seen;
bool HasDbg = DAG->hasDebugValues();
// Emit a node, and determine where its first instruction is for debuginfo.
// Zero, one, or multiple instructions can be created when emitting a node.
auto EmitNode =
[&](SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) -> MachineInstr * {
// Fetch instruction prior to this, or end() if nonexistant.
auto GetPrevInsn = [&](MachineBasicBlock::iterator I) {
if (I == BB->begin())
return BB->end();
else
return std::prev(Emitter.getInsertPos());
};
MachineBasicBlock::iterator Before = GetPrevInsn(Emitter.getInsertPos());
Emitter.EmitNode(Node, IsClone, IsCloned, VRBaseMap);
MachineBasicBlock::iterator After = GetPrevInsn(Emitter.getInsertPos());
// If the iterator did not change, no instructions were inserted.
if (Before == After)
return nullptr;
MachineInstr *MI;
if (Before == BB->end()) {
// There were no prior instructions; the new ones must start at the
// beginning of the block.
MI = &Emitter.getBlock()->instr_front();
} else {
// Return first instruction after the pre-existing instructions.
MI = &*std::next(Before);
}
if (MI->isCall() && DAG->getTarget().Options.EnableDebugEntryValues)
MF.addCallArgsForwardingRegs(MI, DAG->getSDCallSiteInfo(Node));
return MI;
};
// If this is the first BB, emit byval parameter dbg_value's.
if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
for (; PDI != PDE; ++PDI) {
MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
if (DbgMI) {
BB->insert(InsertPos, DbgMI);
// We re-emit the dbg_value closer to its use, too, after instructions
// are emitted to the BB.
(*PDI)->clearIsEmitted();
}
}
}
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
if (!SU) {
// Null SUnit* is a noop.
TII->insertNoop(*Emitter.getBlock(), InsertPos);
continue;
}
// For pre-regalloc scheduling, create instructions corresponding to the
// SDNode and any glued SDNodes and append them to the block.
if (!SU->getNode()) {
// Emit a copy.
EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos);
continue;
}
SmallVector<SDNode *, 4> GluedNodes;
for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
SDNode *N = GluedNodes.back();
auto NewInsn = EmitNode(N, SU->OrigNode != SU, SU->isCloned, VRBaseMap);
// Remember the source order of the inserted instruction.
if (HasDbg)
ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen, NewInsn);
- if (MDNode *MD = DAG->getHeapAllocSite(N)) {
+ if (MDNode *MD = DAG->getHeapAllocSite(N))
if (NewInsn && NewInsn->isCall())
- MF.addCodeViewHeapAllocSite(NewInsn, MD);
- }
+ NewInsn->setHeapAllocMarker(MF, MD);
GluedNodes.pop_back();
}
auto NewInsn =
EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, VRBaseMap);
// Remember the source order of the inserted instruction.
if (HasDbg)
ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders, Seen,
NewInsn);
+
if (MDNode *MD = DAG->getHeapAllocSite(SU->getNode())) {
if (NewInsn && NewInsn->isCall())
- MF.addCodeViewHeapAllocSite(NewInsn, MD);
+ NewInsn->setHeapAllocMarker(MF, MD);
}
}
// Insert all the dbg_values which have not already been inserted in source
// order sequence.
if (HasDbg) {
MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
// Sort the source order instructions and use the order to insert debug
// values. Use stable_sort so that DBG_VALUEs are inserted in the same order
// regardless of the host's implementation fo std::sort.
llvm::stable_sort(Orders, less_first());
std::stable_sort(DAG->DbgBegin(), DAG->DbgEnd(),
[](const SDDbgValue *LHS, const SDDbgValue *RHS) {
return LHS->getOrder() < RHS->getOrder();
});
SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
// Now emit the rest according to source order.
unsigned LastOrder = 0;
for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
unsigned Order = Orders[i].first;
MachineInstr *MI = Orders[i].second;
// Insert all SDDbgValue's whose order(s) are before "Order".
assert(MI);
for (; DI != DE; ++DI) {
if ((*DI)->getOrder() < LastOrder || (*DI)->getOrder() >= Order)
break;
if ((*DI)->isEmitted())
continue;
MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
if (DbgMI) {
if (!LastOrder)
// Insert to start of the BB (after PHIs).
BB->insert(BBBegin, DbgMI);
else {
// Insert at the instruction, which may be in a different
// block, if the block was split by a custom inserter.
MachineBasicBlock::iterator Pos = MI;
MI->getParent()->insert(Pos, DbgMI);
}
}
}
LastOrder = Order;
}
// Add trailing DbgValue's before the terminator. FIXME: May want to add
// some of them before one or more conditional branches?
SmallVector<MachineInstr*, 8> DbgMIs;
for (; DI != DE; ++DI) {
if ((*DI)->isEmitted())
continue;
assert((*DI)->getOrder() >= LastOrder &&
"emitting DBG_VALUE out of order");
if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap))
DbgMIs.push_back(DbgMI);
}
MachineBasicBlock *InsertBB = Emitter.getBlock();
MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator();
InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end());
SDDbgInfo::DbgLabelIterator DLI = DAG->DbgLabelBegin();
SDDbgInfo::DbgLabelIterator DLE = DAG->DbgLabelEnd();
// Now emit the rest according to source order.
LastOrder = 0;
for (const auto &InstrOrder : Orders) {
unsigned Order = InstrOrder.first;
MachineInstr *MI = InstrOrder.second;
if (!MI)
continue;
// Insert all SDDbgLabel's whose order(s) are before "Order".
for (; DLI != DLE &&
(*DLI)->getOrder() >= LastOrder && (*DLI)->getOrder() < Order;
++DLI) {
MachineInstr *DbgMI = Emitter.EmitDbgLabel(*DLI);
if (DbgMI) {
if (!LastOrder)
// Insert to start of the BB (after PHIs).
BB->insert(BBBegin, DbgMI);
else {
// Insert at the instruction, which may be in a different
// block, if the block was split by a custom inserter.
MachineBasicBlock::iterator Pos = MI;
MI->getParent()->insert(Pos, DbgMI);
}
}
}
if (DLI == DLE)
break;
LastOrder = Order;
}
}
InsertPos = Emitter.getInsertPos();
return Emitter.getBlock();
}
/// Return the basic block label.
std::string ScheduleDAGSDNodes::getDAGName() const {
return "sunit-dag." + BB->getFullName();
}
Index: stable/12/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp (revision 356465)
@@ -1,507 +1,573 @@
//===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass inserts stack protectors into functions which need them. A variable
// with a random value in it is stored onto the stack before the local variables
// are allocated. Upon exiting the block, the stored value is checked. If it's
// changed, then there was some sort of violation and the program aborts.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "stack-protector"
STATISTIC(NumFunProtected, "Number of functions protected");
STATISTIC(NumAddrTaken, "Number of local variables that have their address"
" taken.");
static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
cl::init(true), cl::Hidden);
char StackProtector::ID = 0;
INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
"Insert stack protectors", false, true)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
"Insert stack protectors", false, true)
FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.addPreserved<DominatorTreeWrapperPass>();
}
bool StackProtector::runOnFunction(Function &Fn) {
F = &Fn;
M = F->getParent();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
Trip = TM->getTargetTriple();
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
HasPrologue = false;
HasIRCheck = false;
Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size");
if (Attr.isStringAttribute() &&
Attr.getValueAsString().getAsInteger(10, SSPBufferSize))
return false; // Invalid integer string
if (!RequiresStackProtector())
return false;
// TODO(etienneb): Functions with funclets are not correctly supported now.
// Do nothing if this is funclet-based personality.
if (Fn.hasPersonalityFn()) {
EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn());
if (isFuncletEHPersonality(Personality))
return false;
}
++NumFunProtected;
return InsertStackProtectors();
}
/// \param [out] IsLarge is set to true if a protectable array is found and
/// it is "large" ( >= ssp-buffer-size). In the case of a structure with
/// multiple arrays, this gets set if any of them is large.
bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
bool Strong,
bool InStruct) const {
if (!Ty)
return false;
if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
if (!AT->getElementType()->isIntegerTy(8)) {
// If we're on a non-Darwin platform or we're inside of a structure, don't
// add stack protectors unless the array is a character array.
// However, in strong mode any array, regardless of type and size,
// triggers a protector.
if (!Strong && (InStruct || !Trip.isOSDarwin()))
return false;
}
// If an array has more than SSPBufferSize bytes of allocated space, then we
// emit stack protectors.
if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
IsLarge = true;
return true;
}
if (Strong)
// Require a protector for all arrays in strong mode
return true;
}
const StructType *ST = dyn_cast<StructType>(Ty);
if (!ST)
return false;
bool NeedsProtector = false;
for (StructType::element_iterator I = ST->element_begin(),
E = ST->element_end();
I != E; ++I)
if (ContainsProtectableArray(*I, IsLarge, Strong, true)) {
// If the element is a protectable array and is large (>= SSPBufferSize)
// then we are done. If the protectable array is not large, then
// keep looking in case a subsequent element is a large array.
if (IsLarge)
return true;
NeedsProtector = true;
}
return NeedsProtector;
}
+bool StackProtector::HasAddressTaken(const Instruction *AI,
+ SmallPtrSetImpl<const PHINode *> &VisitedPHIs) {
+ for (const User *U : AI->users()) {
+ const auto *I = cast<Instruction>(U);
+ switch (I->getOpcode()) {
+ case Instruction::Store:
+ if (AI == cast<StoreInst>(I)->getValueOperand())
+ return true;
+ break;
+ case Instruction::AtomicCmpXchg:
+ // cmpxchg conceptually includes both a load and store from the same
+ // location. So, like store, the value being stored is what matters.
+ if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
+ return true;
+ break;
+ case Instruction::PtrToInt:
+ if (AI == cast<PtrToIntInst>(I)->getOperand(0))
+ return true;
+ break;
+ case Instruction::Call: {
+ // Ignore intrinsics that do not become real instructions.
+ // TODO: Narrow this to intrinsics that have store-like effects.
+ const auto *CI = cast<CallInst>(I);
+ if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
+ return true;
+ break;
+ }
+ case Instruction::Invoke:
+ return true;
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ case Instruction::Select:
+ case Instruction::AddrSpaceCast:
+ if (HasAddressTaken(I, VisitedPHIs))
+ return true;
+ break;
+ case Instruction::PHI: {
+ // Keep track of what PHI nodes we have already visited to ensure
+ // they are only visited once.
+ const auto *PN = cast<PHINode>(I);
+ if (VisitedPHIs.insert(PN).second)
+ if (HasAddressTaken(PN, VisitedPHIs))
+ return true;
+ break;
+ }
+ case Instruction::Load:
+ case Instruction::AtomicRMW:
+ case Instruction::Ret:
+ // These instructions take an address operand, but have load-like or
+ // other innocuous behavior that should not trigger a stack protector.
+ // atomicrmw conceptually has both load and store semantics, but the
+ // value being stored must be integer; so if a pointer is being stored,
+ // we'll catch it in the PtrToInt case above.
+ break;
+ default:
+ // Conservatively return true for any instruction that takes an address
+ // operand, but is not handled above.
+ return true;
+ }
+ }
+ return false;
+}
+
/// Search for the first call to the llvm.stackprotector intrinsic and return it
/// if present.
static const CallInst *findStackProtectorIntrinsic(Function &F) {
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
if (const CallInst *CI = dyn_cast<CallInst>(&I))
if (CI->getCalledFunction() ==
Intrinsic::getDeclaration(F.getParent(), Intrinsic::stackprotector))
return CI;
return nullptr;
}
/// Check whether or not this function needs a stack protector based
/// upon the stack protector level.
///
/// We use two heuristics: a standard (ssp) and strong (sspstrong).
/// The standard heuristic which will add a guard variable to functions that
/// call alloca with a either a variable size or a size >= SSPBufferSize,
/// functions with character buffers larger than SSPBufferSize, and functions
/// with aggregates containing character buffers larger than SSPBufferSize. The
/// strong heuristic will add a guard variables to functions that call alloca
/// regardless of size, functions with any buffer regardless of type and size,
/// functions with aggregates that contain any buffer regardless of type and
/// size, and functions that contain stack-based variables that have had their
/// address taken.
bool StackProtector::RequiresStackProtector() {
bool Strong = false;
bool NeedsProtector = false;
HasPrologue = findStackProtectorIntrinsic(*F);
if (F->hasFnAttribute(Attribute::SafeStack))
return false;
// We are constructing the OptimizationRemarkEmitter on the fly rather than
// using the analysis pass to avoid building DominatorTree and LoopInfo which
// are not available this late in the IR pipeline.
OptimizationRemarkEmitter ORE(F);
if (F->hasFnAttribute(Attribute::StackProtectReq)) {
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
<< "Stack protection applied to function "
<< ore::NV("Function", F)
<< " due to a function attribute or command-line switch";
});
NeedsProtector = true;
Strong = true; // Use the same heuristic as strong to determine SSPLayout
} else if (F->hasFnAttribute(Attribute::StackProtectStrong))
Strong = true;
else if (HasPrologue)
NeedsProtector = true;
else if (!F->hasFnAttribute(Attribute::StackProtect))
return false;
+ /// VisitedPHIs - The set of PHI nodes visited when determining
+ /// if a variable's reference has been taken. This set
+ /// is maintained to ensure we don't visit the same PHI node multiple
+ /// times.
+ SmallPtrSet<const PHINode *, 16> VisitedPHIs;
+
for (const BasicBlock &BB : *F) {
for (const Instruction &I : BB) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
if (AI->isArrayAllocation()) {
auto RemarkBuilder = [&]() {
return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
&I)
<< "Stack protection applied to function "
<< ore::NV("Function", F)
<< " due to a call to alloca or use of a variable length "
"array";
};
if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
// A call to alloca with size >= SSPBufferSize requires
// stack protectors.
Layout.insert(std::make_pair(AI,
MachineFrameInfo::SSPLK_LargeArray));
ORE.emit(RemarkBuilder);
NeedsProtector = true;
} else if (Strong) {
// Require protectors for all alloca calls in strong mode.
Layout.insert(std::make_pair(AI,
MachineFrameInfo::SSPLK_SmallArray));
ORE.emit(RemarkBuilder);
NeedsProtector = true;
}
} else {
// A call to alloca with a variable size requires protectors.
Layout.insert(std::make_pair(AI,
MachineFrameInfo::SSPLK_LargeArray));
ORE.emit(RemarkBuilder);
NeedsProtector = true;
}
continue;
}
bool IsLarge = false;
if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
Layout.insert(std::make_pair(AI, IsLarge
? MachineFrameInfo::SSPLK_LargeArray
: MachineFrameInfo::SSPLK_SmallArray));
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
<< "Stack protection applied to function "
<< ore::NV("Function", F)
<< " due to a stack allocated buffer or struct containing a "
"buffer";
});
NeedsProtector = true;
continue;
}
- if (Strong && PointerMayBeCaptured(AI,
- /* ReturnCaptures */ false,
- /* StoreCaptures */ true)) {
+ if (Strong && HasAddressTaken(AI, VisitedPHIs)) {
++NumAddrTaken;
Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
&I)
<< "Stack protection applied to function "
<< ore::NV("Function", F)
<< " due to the address of a local variable being taken";
});
NeedsProtector = true;
}
}
}
}
return NeedsProtector;
}
/// Create a stack guard loading and populate whether SelectionDAG SSP is
/// supported.
static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
IRBuilder<> &B,
bool *SupportsSelectionDAGSP = nullptr) {
if (Value *Guard = TLI->getIRStackGuard(B))
return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
// Use SelectionDAG SSP handling, since there isn't an IR guard.
//
// This is more or less weird, since we optionally output whether we
// should perform a SelectionDAG SP here. The reason is that it's strictly
// defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
// mutating. There is no way to get this bit without mutating the IR, so
// getting this bit has to happen in this right time.
//
// We could have define a new function TLI::supportsSelectionDAGSP(), but that
// will put more burden on the backends' overriding work, especially when it
// actually conveys the same information getIRStackGuard() already gives.
if (SupportsSelectionDAGSP)
*SupportsSelectionDAGSP = true;
TLI->insertSSPDeclarations(*M);
return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
}
/// Insert code into the entry block that stores the stack guard
/// variable onto the stack:
///
/// entry:
/// StackGuardSlot = alloca i8*
/// StackGuard = <stack guard>
/// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
///
/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
/// node.
static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
const TargetLoweringBase *TLI, AllocaInst *&AI) {
bool SupportsSelectionDAGSP = false;
IRBuilder<> B(&F->getEntryBlock().front());
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
{GuardSlot, AI});
return SupportsSelectionDAGSP;
}
/// InsertStackProtectors - Insert code into the prologue and epilogue of the
/// function.
///
/// - The prologue code loads and stores the stack guard onto the stack.
/// - The epilogue checks the value stored in the prologue against the original
/// value. It calls __stack_chk_fail if they differ.
bool StackProtector::InsertStackProtectors() {
// If the target wants to XOR the frame pointer into the guard value, it's
// impossible to emit the check in IR, so the target *must* support stack
// protection in SDAG.
bool SupportsSelectionDAGSP =
TLI->useStackGuardXorFP() ||
(EnableSelectionDAGSP && !TM->Options.EnableFastISel &&
!TM->Options.EnableGlobalISel);
AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
BasicBlock *BB = &*I++;
ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
if (!RI)
continue;
// Generate prologue instrumentation if not already generated.
if (!HasPrologue) {
HasPrologue = true;
SupportsSelectionDAGSP &= CreatePrologue(F, M, RI, TLI, AI);
}
// SelectionDAG based code generation. Nothing else needs to be done here.
// The epilogue instrumentation is postponed to SelectionDAG.
if (SupportsSelectionDAGSP)
break;
// Find the stack guard slot if the prologue was not created by this pass
// itself via a previous call to CreatePrologue().
if (!AI) {
const CallInst *SPCall = findStackProtectorIntrinsic(*F);
assert(SPCall && "Call to llvm.stackprotector is missing");
AI = cast<AllocaInst>(SPCall->getArgOperand(1));
}
// Set HasIRCheck to true, so that SelectionDAG will not generate its own
// version. SelectionDAG called 'shouldEmitSDCheck' to check whether
// instrumentation has already been generated.
HasIRCheck = true;
// Generate epilogue instrumentation. The epilogue intrumentation can be
// function-based or inlined depending on which mechanism the target is
// providing.
if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
// Generate the function-based epilogue instrumentation.
// The target provides a guard check function, generate a call to it.
IRBuilder<> B(RI);
LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
CallInst *Call = B.CreateCall(GuardCheck, {Guard});
Call->setAttributes(GuardCheck->getAttributes());
Call->setCallingConv(GuardCheck->getCallingConv());
} else {
// Generate the epilogue with inline instrumentation.
// If we do not support SelectionDAG based tail calls, generate IR level
// tail calls.
//
// For each block with a return instruction, convert this:
//
// return:
// ...
// ret ...
//
// into this:
//
// return:
// ...
// %1 = <stack guard>
// %2 = load StackGuardSlot
// %3 = cmp i1 %1, %2
// br i1 %3, label %SP_return, label %CallStackCheckFailBlk
//
// SP_return:
// ret ...
//
// CallStackCheckFailBlk:
// call void @__stack_chk_fail()
// unreachable
// Create the FailBB. We duplicate the BB every time since the MI tail
// merge pass will merge together all of the various BB into one including
// fail BB generated by the stack protector pseudo instruction.
BasicBlock *FailBB = CreateFailBB();
// Split the basic block before the return instruction.
BasicBlock *NewBB = BB->splitBasicBlock(RI->getIterator(), "SP_return");
// Update the dominator tree if we need to.
if (DT && DT->isReachableFromEntry(BB)) {
DT->addNewBlock(NewBB, BB);
DT->addNewBlock(FailBB, BB);
}
// Remove default branch instruction to the new BB.
BB->getTerminator()->eraseFromParent();
// Move the newly created basic block to the point right after the old
// basic block so that it's in the "fall through" position.
NewBB->moveAfter(BB);
// Generate the stack protector instructions in the old basic block.
IRBuilder<> B(BB);
Value *Guard = getStackGuard(TLI, M, B);
LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
Value *Cmp = B.CreateICmpEQ(Guard, LI2);
auto SuccessProb =
BranchProbabilityInfo::getBranchProbStackProtector(true);
auto FailureProb =
BranchProbabilityInfo::getBranchProbStackProtector(false);
MDNode *Weights = MDBuilder(F->getContext())
.createBranchWeights(SuccessProb.getNumerator(),
FailureProb.getNumerator());
B.CreateCondBr(Cmp, NewBB, FailBB, Weights);
}
}
// Return if we didn't modify any basic blocks. i.e., there are no return
// statements in the function.
return HasPrologue;
}
/// CreateFailBB - Create a basic block to jump to when the stack protector
/// check fails.
BasicBlock *StackProtector::CreateFailBB() {
LLVMContext &Context = F->getContext();
BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
IRBuilder<> B(FailBB);
B.SetCurrentDebugLocation(DebugLoc::get(0, 0, F->getSubprogram()));
if (Trip.isOSOpenBSD()) {
FunctionCallee StackChkFail = M->getOrInsertFunction(
"__stack_smash_handler", Type::getVoidTy(Context),
Type::getInt8PtrTy(Context));
B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH"));
} else {
FunctionCallee StackChkFail =
M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
B.CreateCall(StackChkFail, {});
}
B.CreateUnreachable();
return FailBB;
}
bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const {
return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
}
void StackProtector::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
if (Layout.empty())
return;
for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
if (MFI.isDeadObjectIndex(I))
continue;
const AllocaInst *AI = MFI.getObjectAllocation(I);
if (!AI)
continue;
SSPLayoutMap::const_iterator LI = Layout.find(AI);
if (LI == Layout.end())
continue;
MFI.setObjectSSPLayout(I, LI->second);
}
}
Index: stable/12/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp (revision 356465)
@@ -1,422 +1,422 @@
//===- ELFObjectFile.cpp - ELF object file implementation -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Part of the ELFObjectFile class implementation.
//
//===----------------------------------------------------------------------===//
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Object/ELF.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/ARMAttributeParser.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <system_error>
#include <utility>
using namespace llvm;
using namespace object;
const EnumEntry<unsigned> llvm::object::ElfSymbolTypes[NumElfSymbolTypes] = {
{"None", "NOTYPE", ELF::STT_NOTYPE},
{"Object", "OBJECT", ELF::STT_OBJECT},
{"Function", "FUNC", ELF::STT_FUNC},
{"Section", "SECTION", ELF::STT_SECTION},
{"File", "FILE", ELF::STT_FILE},
{"Common", "COMMON", ELF::STT_COMMON},
{"TLS", "TLS", ELF::STT_TLS},
{"GNU_IFunc", "IFUNC", ELF::STT_GNU_IFUNC}};
ELFObjectFileBase::ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source)
: ObjectFile(Type, Source) {}
template <class ELFT>
static Expected<std::unique_ptr<ELFObjectFile<ELFT>>>
createPtr(MemoryBufferRef Object) {
auto Ret = ELFObjectFile<ELFT>::create(Object);
if (Error E = Ret.takeError())
return std::move(E);
return make_unique<ELFObjectFile<ELFT>>(std::move(*Ret));
}
Expected<std::unique_ptr<ObjectFile>>
ObjectFile::createELFObjectFile(MemoryBufferRef Obj) {
std::pair<unsigned char, unsigned char> Ident =
getElfArchType(Obj.getBuffer());
std::size_t MaxAlignment =
1ULL << countTrailingZeros(uintptr_t(Obj.getBufferStart()));
if (MaxAlignment < 2)
return createError("Insufficient alignment");
if (Ident.first == ELF::ELFCLASS32) {
if (Ident.second == ELF::ELFDATA2LSB)
return createPtr<ELF32LE>(Obj);
else if (Ident.second == ELF::ELFDATA2MSB)
return createPtr<ELF32BE>(Obj);
else
return createError("Invalid ELF data");
} else if (Ident.first == ELF::ELFCLASS64) {
if (Ident.second == ELF::ELFDATA2LSB)
return createPtr<ELF64LE>(Obj);
else if (Ident.second == ELF::ELFDATA2MSB)
return createPtr<ELF64BE>(Obj);
else
return createError("Invalid ELF data");
}
return createError("Invalid ELF class");
}
SubtargetFeatures ELFObjectFileBase::getMIPSFeatures() const {
SubtargetFeatures Features;
unsigned PlatformFlags = getPlatformFlags();
switch (PlatformFlags & ELF::EF_MIPS_ARCH) {
case ELF::EF_MIPS_ARCH_1:
break;
case ELF::EF_MIPS_ARCH_2:
Features.AddFeature("mips2");
break;
case ELF::EF_MIPS_ARCH_3:
Features.AddFeature("mips3");
break;
case ELF::EF_MIPS_ARCH_4:
Features.AddFeature("mips4");
break;
case ELF::EF_MIPS_ARCH_5:
Features.AddFeature("mips5");
break;
case ELF::EF_MIPS_ARCH_32:
Features.AddFeature("mips32");
break;
case ELF::EF_MIPS_ARCH_64:
Features.AddFeature("mips64");
break;
case ELF::EF_MIPS_ARCH_32R2:
Features.AddFeature("mips32r2");
break;
case ELF::EF_MIPS_ARCH_64R2:
Features.AddFeature("mips64r2");
break;
case ELF::EF_MIPS_ARCH_32R6:
Features.AddFeature("mips32r6");
break;
case ELF::EF_MIPS_ARCH_64R6:
Features.AddFeature("mips64r6");
break;
default:
llvm_unreachable("Unknown EF_MIPS_ARCH value");
}
switch (PlatformFlags & ELF::EF_MIPS_MACH) {
case ELF::EF_MIPS_MACH_NONE:
// No feature associated with this value.
break;
case ELF::EF_MIPS_MACH_OCTEON:
Features.AddFeature("cnmips");
break;
default:
llvm_unreachable("Unknown EF_MIPS_ARCH value");
}
if (PlatformFlags & ELF::EF_MIPS_ARCH_ASE_M16)
Features.AddFeature("mips16");
if (PlatformFlags & ELF::EF_MIPS_MICROMIPS)
Features.AddFeature("micromips");
return Features;
}
SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
SubtargetFeatures Features;
ARMAttributeParser Attributes;
if (Error E = getBuildAttributes(Attributes))
return SubtargetFeatures();
// both ARMv7-M and R have to support thumb hardware div
bool isV7 = false;
if (Attributes.hasAttribute(ARMBuildAttrs::CPU_arch))
isV7 = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch)
== ARMBuildAttrs::v7;
if (Attributes.hasAttribute(ARMBuildAttrs::CPU_arch_profile)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile)) {
case ARMBuildAttrs::ApplicationProfile:
Features.AddFeature("aclass");
break;
case ARMBuildAttrs::RealTimeProfile:
Features.AddFeature("rclass");
if (isV7)
Features.AddFeature("hwdiv");
break;
case ARMBuildAttrs::MicroControllerProfile:
Features.AddFeature("mclass");
if (isV7)
Features.AddFeature("hwdiv");
break;
}
}
if (Attributes.hasAttribute(ARMBuildAttrs::THUMB_ISA_use)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::THUMB_ISA_use)) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
Features.AddFeature("thumb", false);
Features.AddFeature("thumb2", false);
break;
case ARMBuildAttrs::AllowThumb32:
Features.AddFeature("thumb2");
break;
}
}
if (Attributes.hasAttribute(ARMBuildAttrs::FP_arch)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::FP_arch)) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
- Features.AddFeature("vfp2sp", false);
+ Features.AddFeature("vfp2d16sp", false);
Features.AddFeature("vfp3d16sp", false);
Features.AddFeature("vfp4d16sp", false);
break;
case ARMBuildAttrs::AllowFPv2:
Features.AddFeature("vfp2");
break;
case ARMBuildAttrs::AllowFPv3A:
case ARMBuildAttrs::AllowFPv3B:
Features.AddFeature("vfp3");
break;
case ARMBuildAttrs::AllowFPv4A:
case ARMBuildAttrs::AllowFPv4B:
Features.AddFeature("vfp4");
break;
}
}
if (Attributes.hasAttribute(ARMBuildAttrs::Advanced_SIMD_arch)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::Advanced_SIMD_arch)) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
Features.AddFeature("neon", false);
Features.AddFeature("fp16", false);
break;
case ARMBuildAttrs::AllowNeon:
Features.AddFeature("neon");
break;
case ARMBuildAttrs::AllowNeon2:
Features.AddFeature("neon");
Features.AddFeature("fp16");
break;
}
}
if (Attributes.hasAttribute(ARMBuildAttrs::MVE_arch)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::MVE_arch)) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
Features.AddFeature("mve", false);
Features.AddFeature("mve.fp", false);
break;
case ARMBuildAttrs::AllowMVEInteger:
Features.AddFeature("mve.fp", false);
Features.AddFeature("mve");
break;
case ARMBuildAttrs::AllowMVEIntegerAndFloat:
Features.AddFeature("mve.fp");
break;
}
}
if (Attributes.hasAttribute(ARMBuildAttrs::DIV_use)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::DIV_use)) {
default:
break;
case ARMBuildAttrs::DisallowDIV:
Features.AddFeature("hwdiv", false);
Features.AddFeature("hwdiv-arm", false);
break;
case ARMBuildAttrs::AllowDIVExt:
Features.AddFeature("hwdiv");
Features.AddFeature("hwdiv-arm");
break;
}
}
return Features;
}
SubtargetFeatures ELFObjectFileBase::getRISCVFeatures() const {
SubtargetFeatures Features;
unsigned PlatformFlags = getPlatformFlags();
if (PlatformFlags & ELF::EF_RISCV_RVC) {
Features.AddFeature("c");
}
return Features;
}
SubtargetFeatures ELFObjectFileBase::getFeatures() const {
switch (getEMachine()) {
case ELF::EM_MIPS:
return getMIPSFeatures();
case ELF::EM_ARM:
return getARMFeatures();
case ELF::EM_RISCV:
return getRISCVFeatures();
default:
return SubtargetFeatures();
}
}
// FIXME Encode from a tablegen description or target parser.
void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const {
if (TheTriple.getSubArch() != Triple::NoSubArch)
return;
ARMAttributeParser Attributes;
if (Error E = getBuildAttributes(Attributes))
return;
std::string Triple;
// Default to ARM, but use the triple if it's been set.
if (TheTriple.isThumb())
Triple = "thumb";
else
Triple = "arm";
if (Attributes.hasAttribute(ARMBuildAttrs::CPU_arch)) {
switch(Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch)) {
case ARMBuildAttrs::v4:
Triple += "v4";
break;
case ARMBuildAttrs::v4T:
Triple += "v4t";
break;
case ARMBuildAttrs::v5T:
Triple += "v5t";
break;
case ARMBuildAttrs::v5TE:
Triple += "v5te";
break;
case ARMBuildAttrs::v5TEJ:
Triple += "v5tej";
break;
case ARMBuildAttrs::v6:
Triple += "v6";
break;
case ARMBuildAttrs::v6KZ:
Triple += "v6kz";
break;
case ARMBuildAttrs::v6T2:
Triple += "v6t2";
break;
case ARMBuildAttrs::v6K:
Triple += "v6k";
break;
case ARMBuildAttrs::v7:
Triple += "v7";
break;
case ARMBuildAttrs::v6_M:
Triple += "v6m";
break;
case ARMBuildAttrs::v6S_M:
Triple += "v6sm";
break;
case ARMBuildAttrs::v7E_M:
Triple += "v7em";
break;
}
}
if (!isLittleEndian())
Triple += "eb";
TheTriple.setArchName(Triple);
}
std::vector<std::pair<DataRefImpl, uint64_t>>
ELFObjectFileBase::getPltAddresses() const {
std::string Err;
const auto Triple = makeTriple();
const auto *T = TargetRegistry::lookupTarget(Triple.str(), Err);
if (!T)
return {};
uint64_t JumpSlotReloc = 0;
switch (Triple.getArch()) {
case Triple::x86:
JumpSlotReloc = ELF::R_386_JUMP_SLOT;
break;
case Triple::x86_64:
JumpSlotReloc = ELF::R_X86_64_JUMP_SLOT;
break;
case Triple::aarch64:
JumpSlotReloc = ELF::R_AARCH64_JUMP_SLOT;
break;
default:
return {};
}
std::unique_ptr<const MCInstrInfo> MII(T->createMCInstrInfo());
std::unique_ptr<const MCInstrAnalysis> MIA(
T->createMCInstrAnalysis(MII.get()));
if (!MIA)
return {};
Optional<SectionRef> Plt = None, RelaPlt = None, GotPlt = None;
for (const SectionRef &Section : sections()) {
StringRef Name;
if (Section.getName(Name))
continue;
if (Name == ".plt")
Plt = Section;
else if (Name == ".rela.plt" || Name == ".rel.plt")
RelaPlt = Section;
else if (Name == ".got.plt")
GotPlt = Section;
}
if (!Plt || !RelaPlt || !GotPlt)
return {};
Expected<StringRef> PltContents = Plt->getContents();
if (!PltContents) {
consumeError(PltContents.takeError());
return {};
}
auto PltEntries = MIA->findPltEntries(Plt->getAddress(),
arrayRefFromStringRef(*PltContents),
GotPlt->getAddress(), Triple);
// Build a map from GOT entry virtual address to PLT entry virtual address.
DenseMap<uint64_t, uint64_t> GotToPlt;
for (const auto &Entry : PltEntries)
GotToPlt.insert(std::make_pair(Entry.second, Entry.first));
// Find the relocations in the dynamic relocation table that point to
// locations in the GOT for which we know the corresponding PLT entry.
std::vector<std::pair<DataRefImpl, uint64_t>> Result;
for (const auto &Relocation : RelaPlt->relocations()) {
if (Relocation.getType() != JumpSlotReloc)
continue;
auto PltEntryIter = GotToPlt.find(Relocation.getOffset());
if (PltEntryIter != GotToPlt.end())
Result.push_back(std::make_pair(
Relocation.getSymbol()->getRawDataRefImpl(), PltEntryIter->second));
}
return Result;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp (revision 356465)
@@ -1,626 +1,628 @@
//===-- ARMTargetParser - Parser for ARM target features --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise ARM hardware features
// such as FPU/CPU/ARCH/extensions and specific support such as HWDIV.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/ARMTargetParser.h"
#include "llvm/ADT/StringSwitch.h"
#include <cctype>
using namespace llvm;
static StringRef getHWDivSynonym(StringRef HWDiv) {
return StringSwitch<StringRef>(HWDiv)
.Case("thumb,arm", "arm,thumb")
.Default(HWDiv);
}
// Allows partial match, ex. "v7a" matches "armv7a".
ARM::ArchKind ARM::parseArch(StringRef Arch) {
Arch = getCanonicalArchName(Arch);
StringRef Syn = getArchSynonym(Arch);
for (const auto A : ARCHNames) {
if (A.getName().endswith(Syn))
return A.ID;
}
return ArchKind::INVALID;
}
// Version number (ex. v7 = 7).
unsigned ARM::parseArchVersion(StringRef Arch) {
Arch = getCanonicalArchName(Arch);
switch (parseArch(Arch)) {
case ArchKind::ARMV2:
case ArchKind::ARMV2A:
return 2;
case ArchKind::ARMV3:
case ArchKind::ARMV3M:
return 3;
case ArchKind::ARMV4:
case ArchKind::ARMV4T:
return 4;
case ArchKind::ARMV5T:
case ArchKind::ARMV5TE:
case ArchKind::IWMMXT:
case ArchKind::IWMMXT2:
case ArchKind::XSCALE:
case ArchKind::ARMV5TEJ:
return 5;
case ArchKind::ARMV6:
case ArchKind::ARMV6K:
case ArchKind::ARMV6T2:
case ArchKind::ARMV6KZ:
case ArchKind::ARMV6M:
return 6;
case ArchKind::ARMV7A:
case ArchKind::ARMV7VE:
case ArchKind::ARMV7R:
case ArchKind::ARMV7M:
case ArchKind::ARMV7S:
case ArchKind::ARMV7EM:
case ArchKind::ARMV7K:
return 7;
case ArchKind::ARMV8A:
case ArchKind::ARMV8_1A:
case ArchKind::ARMV8_2A:
case ArchKind::ARMV8_3A:
case ArchKind::ARMV8_4A:
case ArchKind::ARMV8_5A:
case ArchKind::ARMV8R:
case ArchKind::ARMV8MBaseline:
case ArchKind::ARMV8MMainline:
case ArchKind::ARMV8_1MMainline:
return 8;
case ArchKind::INVALID:
return 0;
}
llvm_unreachable("Unhandled architecture");
}
// Profile A/R/M
ARM::ProfileKind ARM::parseArchProfile(StringRef Arch) {
Arch = getCanonicalArchName(Arch);
switch (parseArch(Arch)) {
case ArchKind::ARMV6M:
case ArchKind::ARMV7M:
case ArchKind::ARMV7EM:
case ArchKind::ARMV8MMainline:
case ArchKind::ARMV8MBaseline:
case ArchKind::ARMV8_1MMainline:
return ProfileKind::M;
case ArchKind::ARMV7R:
case ArchKind::ARMV8R:
return ProfileKind::R;
case ArchKind::ARMV7A:
case ArchKind::ARMV7VE:
case ArchKind::ARMV7K:
case ArchKind::ARMV8A:
case ArchKind::ARMV8_1A:
case ArchKind::ARMV8_2A:
case ArchKind::ARMV8_3A:
case ArchKind::ARMV8_4A:
case ArchKind::ARMV8_5A:
return ProfileKind::A;
case ArchKind::ARMV2:
case ArchKind::ARMV2A:
case ArchKind::ARMV3:
case ArchKind::ARMV3M:
case ArchKind::ARMV4:
case ArchKind::ARMV4T:
case ArchKind::ARMV5T:
case ArchKind::ARMV5TE:
case ArchKind::ARMV5TEJ:
case ArchKind::ARMV6:
case ArchKind::ARMV6K:
case ArchKind::ARMV6T2:
case ArchKind::ARMV6KZ:
case ArchKind::ARMV7S:
case ArchKind::IWMMXT:
case ArchKind::IWMMXT2:
case ArchKind::XSCALE:
case ArchKind::INVALID:
return ProfileKind::INVALID;
}
llvm_unreachable("Unhandled architecture");
}
StringRef ARM::getArchSynonym(StringRef Arch) {
return StringSwitch<StringRef>(Arch)
.Case("v5", "v5t")
.Case("v5e", "v5te")
.Case("v6j", "v6")
.Case("v6hl", "v6k")
.Cases("v6m", "v6sm", "v6s-m", "v6-m")
.Cases("v6z", "v6zk", "v6kz")
.Cases("v7", "v7a", "v7hl", "v7l", "v7-a")
.Case("v7r", "v7-r")
.Case("v7m", "v7-m")
.Case("v7em", "v7e-m")
.Cases("v8", "v8a", "v8l", "aarch64", "arm64", "v8-a")
.Case("v8.1a", "v8.1-a")
.Case("v8.2a", "v8.2-a")
.Case("v8.3a", "v8.3-a")
.Case("v8.4a", "v8.4-a")
.Case("v8.5a", "v8.5-a")
.Case("v8r", "v8-r")
.Case("v8m.base", "v8-m.base")
.Case("v8m.main", "v8-m.main")
.Case("v8.1m.main", "v8.1-m.main")
.Default(Arch);
}
bool ARM::getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features) {
if (FPUKind >= FK_LAST || FPUKind == FK_INVALID)
return false;
static const struct FPUFeatureNameInfo {
const char *PlusName, *MinusName;
FPUVersion MinVersion;
FPURestriction MaxRestriction;
} FPUFeatureInfoList[] = {
// We have to specify the + and - versions of the name in full so
// that we can return them as static StringRefs.
//
// Also, the SubtargetFeatures ending in just "sp" are listed here
// under FPURestriction::None, which is the only FPURestriction in
// which they would be valid (since FPURestriction::SP doesn't
// exist).
{"+fpregs", "-fpregs", FPUVersion::VFPV2, FPURestriction::SP_D16},
{"+vfp2", "-vfp2", FPUVersion::VFPV2, FPURestriction::D16},
+ {"+vfp2d16", "-vfp2d16", FPUVersion::VFPV2, FPURestriction::D16},
+ {"+vfp2d16sp", "-vfp2d16sp", FPUVersion::VFPV2, FPURestriction::SP_D16},
{"+vfp2sp", "-vfp2sp", FPUVersion::VFPV2, FPURestriction::SP_D16},
{"+vfp3", "-vfp3", FPUVersion::VFPV3, FPURestriction::None},
{"+vfp3d16", "-vfp3d16", FPUVersion::VFPV3, FPURestriction::D16},
{"+vfp3d16sp", "-vfp3d16sp", FPUVersion::VFPV3, FPURestriction::SP_D16},
{"+vfp3sp", "-vfp3sp", FPUVersion::VFPV3, FPURestriction::None},
{"+fp16", "-fp16", FPUVersion::VFPV3_FP16, FPURestriction::SP_D16},
{"+vfp4", "-vfp4", FPUVersion::VFPV4, FPURestriction::None},
{"+vfp4d16", "-vfp4d16", FPUVersion::VFPV4, FPURestriction::D16},
{"+vfp4d16sp", "-vfp4d16sp", FPUVersion::VFPV4, FPURestriction::SP_D16},
{"+vfp4sp", "-vfp4sp", FPUVersion::VFPV4, FPURestriction::None},
{"+fp-armv8", "-fp-armv8", FPUVersion::VFPV5, FPURestriction::None},
{"+fp-armv8d16", "-fp-armv8d16", FPUVersion::VFPV5, FPURestriction::D16},
{"+fp-armv8d16sp", "-fp-armv8d16sp", FPUVersion::VFPV5, FPURestriction::SP_D16},
{"+fp-armv8sp", "-fp-armv8sp", FPUVersion::VFPV5, FPURestriction::None},
{"+fullfp16", "-fullfp16", FPUVersion::VFPV5_FULLFP16, FPURestriction::SP_D16},
{"+fp64", "-fp64", FPUVersion::VFPV2, FPURestriction::D16},
{"+d32", "-d32", FPUVersion::VFPV3, FPURestriction::None},
};
for (const auto &Info: FPUFeatureInfoList) {
if (FPUNames[FPUKind].FPUVer >= Info.MinVersion &&
FPUNames[FPUKind].Restriction <= Info.MaxRestriction)
Features.push_back(Info.PlusName);
else
Features.push_back(Info.MinusName);
}
static const struct NeonFeatureNameInfo {
const char *PlusName, *MinusName;
NeonSupportLevel MinSupportLevel;
} NeonFeatureInfoList[] = {
{"+neon", "-neon", NeonSupportLevel::Neon},
{"+crypto", "-crypto", NeonSupportLevel::Crypto},
};
for (const auto &Info: NeonFeatureInfoList) {
if (FPUNames[FPUKind].NeonSupport >= Info.MinSupportLevel)
Features.push_back(Info.PlusName);
else
Features.push_back(Info.MinusName);
}
return true;
}
// Little/Big endian
ARM::EndianKind ARM::parseArchEndian(StringRef Arch) {
if (Arch.startswith("armeb") || Arch.startswith("thumbeb") ||
Arch.startswith("aarch64_be"))
return EndianKind::BIG;
if (Arch.startswith("arm") || Arch.startswith("thumb")) {
if (Arch.endswith("eb"))
return EndianKind::BIG;
else
return EndianKind::LITTLE;
}
if (Arch.startswith("aarch64") || Arch.startswith("aarch64_32"))
return EndianKind::LITTLE;
return EndianKind::INVALID;
}
// ARM, Thumb, AArch64
ARM::ISAKind ARM::parseArchISA(StringRef Arch) {
return StringSwitch<ISAKind>(Arch)
.StartsWith("aarch64", ISAKind::AARCH64)
.StartsWith("arm64", ISAKind::AARCH64)
.StartsWith("thumb", ISAKind::THUMB)
.StartsWith("arm", ISAKind::ARM)
.Default(ISAKind::INVALID);
}
unsigned ARM::parseFPU(StringRef FPU) {
StringRef Syn = getFPUSynonym(FPU);
for (const auto F : FPUNames) {
if (Syn == F.getName())
return F.ID;
}
return FK_INVALID;
}
ARM::NeonSupportLevel ARM::getFPUNeonSupportLevel(unsigned FPUKind) {
if (FPUKind >= FK_LAST)
return NeonSupportLevel::None;
return FPUNames[FPUKind].NeonSupport;
}
// MArch is expected to be of the form (arm|thumb)?(eb)?(v.+)?(eb)?, but
// (iwmmxt|xscale)(eb)? is also permitted. If the former, return
// "v.+", if the latter, return unmodified string, minus 'eb'.
// If invalid, return empty string.
StringRef ARM::getCanonicalArchName(StringRef Arch) {
size_t offset = StringRef::npos;
StringRef A = Arch;
StringRef Error = "";
// Begins with "arm" / "thumb", move past it.
if (A.startswith("arm64_32"))
offset = 8;
else if (A.startswith("arm64"))
offset = 5;
else if (A.startswith("aarch64_32"))
offset = 10;
else if (A.startswith("arm"))
offset = 3;
else if (A.startswith("thumb"))
offset = 5;
else if (A.startswith("aarch64")) {
offset = 7;
// AArch64 uses "_be", not "eb" suffix.
if (A.find("eb") != StringRef::npos)
return Error;
if (A.substr(offset, 3) == "_be")
offset += 3;
}
// Ex. "armebv7", move past the "eb".
if (offset != StringRef::npos && A.substr(offset, 2) == "eb")
offset += 2;
// Or, if it ends with eb ("armv7eb"), chop it off.
else if (A.endswith("eb"))
A = A.substr(0, A.size() - 2);
// Trim the head
if (offset != StringRef::npos)
A = A.substr(offset);
// Empty string means offset reached the end, which means it's valid.
if (A.empty())
return Arch;
// Only match non-marketing names
if (offset != StringRef::npos) {
// Must start with 'vN'.
if (A.size() >= 2 && (A[0] != 'v' || !std::isdigit(A[1])))
return Error;
// Can't have an extra 'eb'.
if (A.find("eb") != StringRef::npos)
return Error;
}
// Arch will either be a 'v' name (v7a) or a marketing name (xscale).
return A;
}
StringRef ARM::getFPUSynonym(StringRef FPU) {
return StringSwitch<StringRef>(FPU)
.Cases("fpa", "fpe2", "fpe3", "maverick", "invalid") // Unsupported
.Case("vfp2", "vfpv2")
.Case("vfp3", "vfpv3")
.Case("vfp4", "vfpv4")
.Case("vfp3-d16", "vfpv3-d16")
.Case("vfp4-d16", "vfpv4-d16")
.Cases("fp4-sp-d16", "vfpv4-sp-d16", "fpv4-sp-d16")
.Cases("fp4-dp-d16", "fpv4-dp-d16", "vfpv4-d16")
.Case("fp5-sp-d16", "fpv5-sp-d16")
.Cases("fp5-dp-d16", "fpv5-dp-d16", "fpv5-d16")
// FIXME: Clang uses it, but it's bogus, since neon defaults to vfpv3.
.Case("neon-vfpv3", "neon")
.Default(FPU);
}
StringRef ARM::getFPUName(unsigned FPUKind) {
if (FPUKind >= FK_LAST)
return StringRef();
return FPUNames[FPUKind].getName();
}
ARM::FPUVersion ARM::getFPUVersion(unsigned FPUKind) {
if (FPUKind >= FK_LAST)
return FPUVersion::NONE;
return FPUNames[FPUKind].FPUVer;
}
ARM::FPURestriction ARM::getFPURestriction(unsigned FPUKind) {
if (FPUKind >= FK_LAST)
return FPURestriction::None;
return FPUNames[FPUKind].Restriction;
}
unsigned ARM::getDefaultFPU(StringRef CPU, ARM::ArchKind AK) {
if (CPU == "generic")
return ARM::ARCHNames[static_cast<unsigned>(AK)].DefaultFPU;
return StringSwitch<unsigned>(CPU)
#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
.Case(NAME, DEFAULT_FPU)
#include "llvm/Support/ARMTargetParser.def"
.Default(ARM::FK_INVALID);
}
unsigned ARM::getDefaultExtensions(StringRef CPU, ARM::ArchKind AK) {
if (CPU == "generic")
return ARM::ARCHNames[static_cast<unsigned>(AK)].ArchBaseExtensions;
return StringSwitch<unsigned>(CPU)
#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
.Case(NAME, \
ARCHNames[static_cast<unsigned>(ArchKind::ID)].ArchBaseExtensions | \
DEFAULT_EXT)
#include "llvm/Support/ARMTargetParser.def"
.Default(ARM::AEK_INVALID);
}
bool ARM::getHWDivFeatures(unsigned HWDivKind,
std::vector<StringRef> &Features) {
if (HWDivKind == AEK_INVALID)
return false;
if (HWDivKind & AEK_HWDIVARM)
Features.push_back("+hwdiv-arm");
else
Features.push_back("-hwdiv-arm");
if (HWDivKind & AEK_HWDIVTHUMB)
Features.push_back("+hwdiv");
else
Features.push_back("-hwdiv");
return true;
}
bool ARM::getExtensionFeatures(unsigned Extensions,
std::vector<StringRef> &Features) {
if (Extensions == AEK_INVALID)
return false;
for (const auto AE : ARCHExtNames) {
if ((Extensions & AE.ID) == AE.ID && AE.Feature)
Features.push_back(AE.Feature);
else if (AE.NegFeature)
Features.push_back(AE.NegFeature);
}
return getHWDivFeatures(Extensions, Features);
}
StringRef ARM::getArchName(ARM::ArchKind AK) {
return ARCHNames[static_cast<unsigned>(AK)].getName();
}
StringRef ARM::getCPUAttr(ARM::ArchKind AK) {
return ARCHNames[static_cast<unsigned>(AK)].getCPUAttr();
}
StringRef ARM::getSubArch(ARM::ArchKind AK) {
return ARCHNames[static_cast<unsigned>(AK)].getSubArch();
}
unsigned ARM::getArchAttr(ARM::ArchKind AK) {
return ARCHNames[static_cast<unsigned>(AK)].ArchAttr;
}
StringRef ARM::getArchExtName(unsigned ArchExtKind) {
for (const auto AE : ARCHExtNames) {
if (ArchExtKind == AE.ID)
return AE.getName();
}
return StringRef();
}
static bool stripNegationPrefix(StringRef &Name) {
if (Name.startswith("no")) {
Name = Name.substr(2);
return true;
}
return false;
}
StringRef ARM::getArchExtFeature(StringRef ArchExt) {
bool Negated = stripNegationPrefix(ArchExt);
for (const auto AE : ARCHExtNames) {
if (AE.Feature && ArchExt == AE.getName())
return StringRef(Negated ? AE.NegFeature : AE.Feature);
}
return StringRef();
}
static unsigned findDoublePrecisionFPU(unsigned InputFPUKind) {
const ARM::FPUName &InputFPU = ARM::FPUNames[InputFPUKind];
// If the input FPU already supports double-precision, then there
// isn't any different FPU we can return here.
//
// The current available FPURestriction values are None (no
// restriction), D16 (only 16 d-regs) and SP_D16 (16 d-regs
// and single precision only); there's no value representing
// SP restriction without D16. So this test just means 'is it
// SP only?'.
if (InputFPU.Restriction != ARM::FPURestriction::SP_D16)
return ARM::FK_INVALID;
// Otherwise, look for an FPU entry with all the same fields, except
// that SP_D16 has been replaced with just D16, representing adding
// double precision and not changing anything else.
for (const ARM::FPUName &CandidateFPU : ARM::FPUNames) {
if (CandidateFPU.FPUVer == InputFPU.FPUVer &&
CandidateFPU.NeonSupport == InputFPU.NeonSupport &&
CandidateFPU.Restriction == ARM::FPURestriction::D16) {
return CandidateFPU.ID;
}
}
// nothing found
return ARM::FK_INVALID;
}
static unsigned getAEKID(StringRef ArchExtName) {
for (const auto AE : ARM::ARCHExtNames)
if (AE.getName() == ArchExtName)
return AE.ID;
return ARM::AEK_INVALID;
}
bool ARM::appendArchExtFeatures(
StringRef CPU, ARM::ArchKind AK, StringRef ArchExt,
std::vector<StringRef> &Features) {
size_t StartingNumFeatures = Features.size();
const bool Negated = stripNegationPrefix(ArchExt);
unsigned ID = getAEKID(ArchExt);
if (ID == AEK_INVALID)
return false;
for (const auto AE : ARCHExtNames) {
if (Negated && (AE.ID & ID) == ID && AE.NegFeature)
Features.push_back(AE.NegFeature);
else if (AE.ID == ID && AE.Feature)
Features.push_back(AE.Feature);
}
if (CPU == "")
CPU = "generic";
if (ArchExt == "fp" || ArchExt == "fp.dp") {
unsigned FPUKind;
if (ArchExt == "fp.dp") {
if (Negated) {
Features.push_back("-fp64");
return true;
}
FPUKind = findDoublePrecisionFPU(getDefaultFPU(CPU, AK));
} else if (Negated) {
FPUKind = ARM::FK_NONE;
} else {
FPUKind = getDefaultFPU(CPU, AK);
}
return ARM::getFPUFeatures(FPUKind, Features);
}
return StartingNumFeatures != Features.size();
}
StringRef ARM::getHWDivName(unsigned HWDivKind) {
for (const auto D : HWDivNames) {
if (HWDivKind == D.ID)
return D.getName();
}
return StringRef();
}
StringRef ARM::getDefaultCPU(StringRef Arch) {
ArchKind AK = parseArch(Arch);
if (AK == ArchKind::INVALID)
return StringRef();
// Look for multiple AKs to find the default for pair AK+Name.
for (const auto CPU : CPUNames) {
if (CPU.ArchID == AK && CPU.Default)
return CPU.getName();
}
// If we can't find a default then target the architecture instead
return "generic";
}
unsigned ARM::parseHWDiv(StringRef HWDiv) {
StringRef Syn = getHWDivSynonym(HWDiv);
for (const auto D : HWDivNames) {
if (Syn == D.getName())
return D.ID;
}
return AEK_INVALID;
}
unsigned ARM::parseArchExt(StringRef ArchExt) {
for (const auto A : ARCHExtNames) {
if (ArchExt == A.getName())
return A.ID;
}
return AEK_INVALID;
}
ARM::ArchKind ARM::parseCPUArch(StringRef CPU) {
for (const auto C : CPUNames) {
if (CPU == C.getName())
return C.ArchID;
}
return ArchKind::INVALID;
}
void ARM::fillValidCPUArchList(SmallVectorImpl<StringRef> &Values) {
for (const CpuNames<ArchKind> &Arch : CPUNames) {
if (Arch.ArchID != ArchKind::INVALID)
Values.push_back(Arch.getName());
}
}
StringRef ARM::computeDefaultTargetABI(const Triple &TT, StringRef CPU) {
StringRef ArchName =
CPU.empty() ? TT.getArchName() : getArchName(parseCPUArch(CPU));
if (TT.isOSBinFormatMachO()) {
if (TT.getEnvironment() == Triple::EABI ||
TT.getOS() == Triple::UnknownOS ||
parseArchProfile(ArchName) == ProfileKind::M)
return "aapcs";
if (TT.isWatchABI())
return "aapcs16";
return "apcs-gnu";
} else if (TT.isOSWindows())
// FIXME: this is invalid for WindowsCE.
return "aapcs";
// Select the default based on the platform.
switch (TT.getEnvironment()) {
case Triple::Android:
case Triple::GNUEABI:
case Triple::GNUEABIHF:
case Triple::MuslEABI:
case Triple::MuslEABIHF:
return "aapcs-linux";
case Triple::EABIHF:
case Triple::EABI:
return "aapcs";
default:
if (TT.isOSNetBSD())
return "apcs-gnu";
if (TT.isOSOpenBSD())
return "aapcs-linux";
return "aapcs";
}
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp (revision 356465)
@@ -1,1587 +1,1588 @@
//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class prints an AArch64 MCInst to a .s file.
//
//===----------------------------------------------------------------------===//
#include "AArch64InstPrinter.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
#include <string>
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
#define GET_INSTRUCTION_NAME
#define PRINT_ALIAS_INSTR
#include "AArch64GenAsmWriter.inc"
#define GET_INSTRUCTION_NAME
#define PRINT_ALIAS_INSTR
#include "AArch64GenAsmWriter1.inc"
AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI) {}
AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: AArch64InstPrinter(MAI, MII, MRI) {}
void AArch64InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
// This is for .cfi directives.
OS << getRegisterName(RegNo);
}
void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot,
const MCSubtargetInfo &STI) {
// Check for special encodings and print the canonical alias instead.
unsigned Opcode = MI->getOpcode();
if (Opcode == AArch64::SYSxt)
if (printSysAlias(MI, STI, O)) {
printAnnotation(O, Annot);
return;
}
// SBFM/UBFM should print to a nicer aliased form if possible.
if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
const MCOperand &Op0 = MI->getOperand(0);
const MCOperand &Op1 = MI->getOperand(1);
const MCOperand &Op2 = MI->getOperand(2);
const MCOperand &Op3 = MI->getOperand(3);
bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
const char *AsmMnemonic = nullptr;
switch (Op3.getImm()) {
default:
break;
case 7:
if (IsSigned)
AsmMnemonic = "sxtb";
else if (!Is64Bit)
AsmMnemonic = "uxtb";
break;
case 15:
if (IsSigned)
AsmMnemonic = "sxth";
else if (!Is64Bit)
AsmMnemonic = "uxth";
break;
case 31:
// *xtw is only valid for signed 64-bit operations.
if (Is64Bit && IsSigned)
AsmMnemonic = "sxtw";
break;
}
if (AsmMnemonic) {
O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
<< ", " << getRegisterName(getWRegFromXReg(Op1.getReg()));
printAnnotation(O, Annot);
return;
}
}
// All immediate shifts are aliases, implemented using the Bitfield
// instruction. In all cases the immediate shift amount shift must be in
// the range 0 to (reg.size -1).
if (Op2.isImm() && Op3.isImm()) {
const char *AsmMnemonic = nullptr;
int shift = 0;
int64_t immr = Op2.getImm();
int64_t imms = Op3.getImm();
if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
AsmMnemonic = "lsl";
shift = 31 - imms;
} else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
((imms + 1 == immr))) {
AsmMnemonic = "lsl";
shift = 63 - imms;
} else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
AsmMnemonic = "lsr";
shift = immr;
} else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
AsmMnemonic = "lsr";
shift = immr;
} else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
AsmMnemonic = "asr";
shift = immr;
} else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
AsmMnemonic = "asr";
shift = immr;
}
if (AsmMnemonic) {
O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
<< ", " << getRegisterName(Op1.getReg()) << ", #" << shift;
printAnnotation(O, Annot);
return;
}
}
// SBFIZ/UBFIZ aliases
if (Op2.getImm() > Op3.getImm()) {
O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t'
<< getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
<< ", #" << (Is64Bit ? 64 : 32) - Op2.getImm() << ", #" << Op3.getImm() + 1;
printAnnotation(O, Annot);
return;
}
// Otherwise SBFX/UBFX is the preferred form
O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t'
<< getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
<< ", #" << Op2.getImm() << ", #" << Op3.getImm() - Op2.getImm() + 1;
printAnnotation(O, Annot);
return;
}
if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
const MCOperand &Op0 = MI->getOperand(0); // Op1 == Op0
const MCOperand &Op2 = MI->getOperand(2);
int ImmR = MI->getOperand(3).getImm();
int ImmS = MI->getOperand(4).getImm();
if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
- (ImmR == 0 || ImmS < ImmR)) {
+ (ImmR == 0 || ImmS < ImmR) &&
+ STI.getFeatureBits()[AArch64::HasV8_2aOps]) {
// BFC takes precedence over its entire range, sligtly differently to BFI.
int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
O << "\tbfc\t" << getRegisterName(Op0.getReg())
<< ", #" << LSB << ", #" << Width;
printAnnotation(O, Annot);
return;
} else if (ImmS < ImmR) {
// BFI alias
int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
O << "\tbfi\t" << getRegisterName(Op0.getReg()) << ", "
<< getRegisterName(Op2.getReg()) << ", #" << LSB << ", #" << Width;
printAnnotation(O, Annot);
return;
}
int LSB = ImmR;
int Width = ImmS - ImmR + 1;
// Otherwise BFXIL the preferred form
O << "\tbfxil\t"
<< getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op2.getReg())
<< ", #" << LSB << ", #" << Width;
printAnnotation(O, Annot);
return;
}
// Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
// (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
// printed.
if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
MI->getOperand(1).isExpr()) {
if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
O << "\tmovz\t";
else
O << "\tmovn\t";
O << getRegisterName(MI->getOperand(0).getReg()) << ", #";
MI->getOperand(1).getExpr()->print(O, &MAI);
return;
}
if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
MI->getOperand(2).isExpr()) {
O << "\tmovk\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #";
MI->getOperand(2).getExpr()->print(O, &MAI);
return;
}
// MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
// domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
// MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
// that can represent the move is the MOV alias, and the rest get printed
// normally.
if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
int Shift = MI->getOperand(2).getImm();
uint64_t Value = (uint64_t)MI->getOperand(1).getImm() << Shift;
if (AArch64_AM::isMOVZMovAlias(Value, Shift,
Opcode == AArch64::MOVZXi ? 64 : 32)) {
O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
<< formatImm(SignExtend64(Value, RegWidth));
return;
}
}
if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
int Shift = MI->getOperand(2).getImm();
uint64_t Value = ~((uint64_t)MI->getOperand(1).getImm() << Shift);
if (RegWidth == 32)
Value = Value & 0xffffffff;
if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
<< formatImm(SignExtend64(Value, RegWidth));
return;
}
}
if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
(MI->getOperand(1).getReg() == AArch64::XZR ||
MI->getOperand(1).getReg() == AArch64::WZR) &&
MI->getOperand(2).isImm()) {
int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
uint64_t Value = AArch64_AM::decodeLogicalImmediate(
MI->getOperand(2).getImm(), RegWidth);
if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
<< formatImm(SignExtend64(Value, RegWidth));
return;
}
}
if (Opcode == AArch64::CompilerBarrier) {
O << '\t' << MAI.getCommentString() << " COMPILER BARRIER";
printAnnotation(O, Annot);
return;
}
// Instruction TSB is specified as a one operand instruction, but 'csync' is
// not encoded, so for printing it is treated as a special case here:
if (Opcode == AArch64::TSB) {
O << "\ttsb\tcsync";
return;
}
if (!printAliasInstr(MI, STI, O))
printInstruction(MI, STI, O);
printAnnotation(O, Annot);
if (atomicBarrierDroppedOnZero(Opcode) &&
(MI->getOperand(0).getReg() == AArch64::XZR ||
MI->getOperand(0).getReg() == AArch64::WZR)) {
printAnnotation(O, "acquire semantics dropped since destination is zero");
}
}
static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
bool &IsTbx) {
switch (Opcode) {
case AArch64::TBXv8i8One:
case AArch64::TBXv8i8Two:
case AArch64::TBXv8i8Three:
case AArch64::TBXv8i8Four:
IsTbx = true;
Layout = ".8b";
return true;
case AArch64::TBLv8i8One:
case AArch64::TBLv8i8Two:
case AArch64::TBLv8i8Three:
case AArch64::TBLv8i8Four:
IsTbx = false;
Layout = ".8b";
return true;
case AArch64::TBXv16i8One:
case AArch64::TBXv16i8Two:
case AArch64::TBXv16i8Three:
case AArch64::TBXv16i8Four:
IsTbx = true;
Layout = ".16b";
return true;
case AArch64::TBLv16i8One:
case AArch64::TBLv16i8Two:
case AArch64::TBLv16i8Three:
case AArch64::TBLv16i8Four:
IsTbx = false;
Layout = ".16b";
return true;
default:
return false;
}
}
struct LdStNInstrDesc {
unsigned Opcode;
const char *Mnemonic;
const char *Layout;
int ListOperand;
bool HasLane;
int NaturalOffset;
};
static const LdStNInstrDesc LdStNInstInfo[] = {
{ AArch64::LD1i8, "ld1", ".b", 1, true, 0 },
{ AArch64::LD1i16, "ld1", ".h", 1, true, 0 },
{ AArch64::LD1i32, "ld1", ".s", 1, true, 0 },
{ AArch64::LD1i64, "ld1", ".d", 1, true, 0 },
{ AArch64::LD1i8_POST, "ld1", ".b", 2, true, 1 },
{ AArch64::LD1i16_POST, "ld1", ".h", 2, true, 2 },
{ AArch64::LD1i32_POST, "ld1", ".s", 2, true, 4 },
{ AArch64::LD1i64_POST, "ld1", ".d", 2, true, 8 },
{ AArch64::LD1Rv16b, "ld1r", ".16b", 0, false, 0 },
{ AArch64::LD1Rv8h, "ld1r", ".8h", 0, false, 0 },
{ AArch64::LD1Rv4s, "ld1r", ".4s", 0, false, 0 },
{ AArch64::LD1Rv2d, "ld1r", ".2d", 0, false, 0 },
{ AArch64::LD1Rv8b, "ld1r", ".8b", 0, false, 0 },
{ AArch64::LD1Rv4h, "ld1r", ".4h", 0, false, 0 },
{ AArch64::LD1Rv2s, "ld1r", ".2s", 0, false, 0 },
{ AArch64::LD1Rv1d, "ld1r", ".1d", 0, false, 0 },
{ AArch64::LD1Rv16b_POST, "ld1r", ".16b", 1, false, 1 },
{ AArch64::LD1Rv8h_POST, "ld1r", ".8h", 1, false, 2 },
{ AArch64::LD1Rv4s_POST, "ld1r", ".4s", 1, false, 4 },
{ AArch64::LD1Rv2d_POST, "ld1r", ".2d", 1, false, 8 },
{ AArch64::LD1Rv8b_POST, "ld1r", ".8b", 1, false, 1 },
{ AArch64::LD1Rv4h_POST, "ld1r", ".4h", 1, false, 2 },
{ AArch64::LD1Rv2s_POST, "ld1r", ".2s", 1, false, 4 },
{ AArch64::LD1Rv1d_POST, "ld1r", ".1d", 1, false, 8 },
{ AArch64::LD1Onev16b, "ld1", ".16b", 0, false, 0 },
{ AArch64::LD1Onev8h, "ld1", ".8h", 0, false, 0 },
{ AArch64::LD1Onev4s, "ld1", ".4s", 0, false, 0 },
{ AArch64::LD1Onev2d, "ld1", ".2d", 0, false, 0 },
{ AArch64::LD1Onev8b, "ld1", ".8b", 0, false, 0 },
{ AArch64::LD1Onev4h, "ld1", ".4h", 0, false, 0 },
{ AArch64::LD1Onev2s, "ld1", ".2s", 0, false, 0 },
{ AArch64::LD1Onev1d, "ld1", ".1d", 0, false, 0 },
{ AArch64::LD1Onev16b_POST, "ld1", ".16b", 1, false, 16 },
{ AArch64::LD1Onev8h_POST, "ld1", ".8h", 1, false, 16 },
{ AArch64::LD1Onev4s_POST, "ld1", ".4s", 1, false, 16 },
{ AArch64::LD1Onev2d_POST, "ld1", ".2d", 1, false, 16 },
{ AArch64::LD1Onev8b_POST, "ld1", ".8b", 1, false, 8 },
{ AArch64::LD1Onev4h_POST, "ld1", ".4h", 1, false, 8 },
{ AArch64::LD1Onev2s_POST, "ld1", ".2s", 1, false, 8 },
{ AArch64::LD1Onev1d_POST, "ld1", ".1d", 1, false, 8 },
{ AArch64::LD1Twov16b, "ld1", ".16b", 0, false, 0 },
{ AArch64::LD1Twov8h, "ld1", ".8h", 0, false, 0 },
{ AArch64::LD1Twov4s, "ld1", ".4s", 0, false, 0 },
{ AArch64::LD1Twov2d, "ld1", ".2d", 0, false, 0 },
{ AArch64::LD1Twov8b, "ld1", ".8b", 0, false, 0 },
{ AArch64::LD1Twov4h, "ld1", ".4h", 0, false, 0 },
{ AArch64::LD1Twov2s, "ld1", ".2s", 0, false, 0 },
{ AArch64::LD1Twov1d, "ld1", ".1d", 0, false, 0 },
{ AArch64::LD1Twov16b_POST, "ld1", ".16b", 1, false, 32 },
{ AArch64::LD1Twov8h_POST, "ld1", ".8h", 1, false, 32 },
{ AArch64::LD1Twov4s_POST, "ld1", ".4s", 1, false, 32 },
{ AArch64::LD1Twov2d_POST, "ld1", ".2d", 1, false, 32 },
{ AArch64::LD1Twov8b_POST, "ld1", ".8b", 1, false, 16 },
{ AArch64::LD1Twov4h_POST, "ld1", ".4h", 1, false, 16 },
{ AArch64::LD1Twov2s_POST, "ld1", ".2s", 1, false, 16 },
{ AArch64::LD1Twov1d_POST, "ld1", ".1d", 1, false, 16 },
{ AArch64::LD1Threev16b, "ld1", ".16b", 0, false, 0 },
{ AArch64::LD1Threev8h, "ld1", ".8h", 0, false, 0 },
{ AArch64::LD1Threev4s, "ld1", ".4s", 0, false, 0 },
{ AArch64::LD1Threev2d, "ld1", ".2d", 0, false, 0 },
{ AArch64::LD1Threev8b, "ld1", ".8b", 0, false, 0 },
{ AArch64::LD1Threev4h, "ld1", ".4h", 0, false, 0 },
{ AArch64::LD1Threev2s, "ld1", ".2s", 0, false, 0 },
{ AArch64::LD1Threev1d, "ld1", ".1d", 0, false, 0 },
{ AArch64::LD1Threev16b_POST, "ld1", ".16b", 1, false, 48 },
{ AArch64::LD1Threev8h_POST, "ld1", ".8h", 1, false, 48 },
{ AArch64::LD1Threev4s_POST, "ld1", ".4s", 1, false, 48 },
{ AArch64::LD1Threev2d_POST, "ld1", ".2d", 1, false, 48 },
{ AArch64::LD1Threev8b_POST, "ld1", ".8b", 1, false, 24 },
{ AArch64::LD1Threev4h_POST, "ld1", ".4h", 1, false, 24 },
{ AArch64::LD1Threev2s_POST, "ld1", ".2s", 1, false, 24 },
{ AArch64::LD1Threev1d_POST, "ld1", ".1d", 1, false, 24 },
{ AArch64::LD1Fourv16b, "ld1", ".16b", 0, false, 0 },
{ AArch64::LD1Fourv8h, "ld1", ".8h", 0, false, 0 },
{ AArch64::LD1Fourv4s, "ld1", ".4s", 0, false, 0 },
{ AArch64::LD1Fourv2d, "ld1", ".2d", 0, false, 0 },
{ AArch64::LD1Fourv8b, "ld1", ".8b", 0, false, 0 },
{ AArch64::LD1Fourv4h, "ld1", ".4h", 0, false, 0 },
{ AArch64::LD1Fourv2s, "ld1", ".2s", 0, false, 0 },
{ AArch64::LD1Fourv1d, "ld1", ".1d", 0, false, 0 },
{ AArch64::LD1Fourv16b_POST, "ld1", ".16b", 1, false, 64 },
{ AArch64::LD1Fourv8h_POST, "ld1", ".8h", 1, false, 64 },
{ AArch64::LD1Fourv4s_POST, "ld1", ".4s", 1, false, 64 },
{ AArch64::LD1Fourv2d_POST, "ld1", ".2d", 1, false, 64 },
{ AArch64::LD1Fourv8b_POST, "ld1", ".8b", 1, false, 32 },
{ AArch64::LD1Fourv4h_POST, "ld1", ".4h", 1, false, 32 },
{ AArch64::LD1Fourv2s_POST, "ld1", ".2s", 1, false, 32 },
{ AArch64::LD1Fourv1d_POST, "ld1", ".1d", 1, false, 32 },
{ AArch64::LD2i8, "ld2", ".b", 1, true, 0 },
{ AArch64::LD2i16, "ld2", ".h", 1, true, 0 },
{ AArch64::LD2i32, "ld2", ".s", 1, true, 0 },
{ AArch64::LD2i64, "ld2", ".d", 1, true, 0 },
{ AArch64::LD2i8_POST, "ld2", ".b", 2, true, 2 },
{ AArch64::LD2i16_POST, "ld2", ".h", 2, true, 4 },
{ AArch64::LD2i32_POST, "ld2", ".s", 2, true, 8 },
{ AArch64::LD2i64_POST, "ld2", ".d", 2, true, 16 },
{ AArch64::LD2Rv16b, "ld2r", ".16b", 0, false, 0 },
{ AArch64::LD2Rv8h, "ld2r", ".8h", 0, false, 0 },
{ AArch64::LD2Rv4s, "ld2r", ".4s", 0, false, 0 },
{ AArch64::LD2Rv2d, "ld2r", ".2d", 0, false, 0 },
{ AArch64::LD2Rv8b, "ld2r", ".8b", 0, false, 0 },
{ AArch64::LD2Rv4h, "ld2r", ".4h", 0, false, 0 },
{ AArch64::LD2Rv2s, "ld2r", ".2s", 0, false, 0 },
{ AArch64::LD2Rv1d, "ld2r", ".1d", 0, false, 0 },
{ AArch64::LD2Rv16b_POST, "ld2r", ".16b", 1, false, 2 },
{ AArch64::LD2Rv8h_POST, "ld2r", ".8h", 1, false, 4 },
{ AArch64::LD2Rv4s_POST, "ld2r", ".4s", 1, false, 8 },
{ AArch64::LD2Rv2d_POST, "ld2r", ".2d", 1, false, 16 },
{ AArch64::LD2Rv8b_POST, "ld2r", ".8b", 1, false, 2 },
{ AArch64::LD2Rv4h_POST, "ld2r", ".4h", 1, false, 4 },
{ AArch64::LD2Rv2s_POST, "ld2r", ".2s", 1, false, 8 },
{ AArch64::LD2Rv1d_POST, "ld2r", ".1d", 1, false, 16 },
{ AArch64::LD2Twov16b, "ld2", ".16b", 0, false, 0 },
{ AArch64::LD2Twov8h, "ld2", ".8h", 0, false, 0 },
{ AArch64::LD2Twov4s, "ld2", ".4s", 0, false, 0 },
{ AArch64::LD2Twov2d, "ld2", ".2d", 0, false, 0 },
{ AArch64::LD2Twov8b, "ld2", ".8b", 0, false, 0 },
{ AArch64::LD2Twov4h, "ld2", ".4h", 0, false, 0 },
{ AArch64::LD2Twov2s, "ld2", ".2s", 0, false, 0 },
{ AArch64::LD2Twov16b_POST, "ld2", ".16b", 1, false, 32 },
{ AArch64::LD2Twov8h_POST, "ld2", ".8h", 1, false, 32 },
{ AArch64::LD2Twov4s_POST, "ld2", ".4s", 1, false, 32 },
{ AArch64::LD2Twov2d_POST, "ld2", ".2d", 1, false, 32 },
{ AArch64::LD2Twov8b_POST, "ld2", ".8b", 1, false, 16 },
{ AArch64::LD2Twov4h_POST, "ld2", ".4h", 1, false, 16 },
{ AArch64::LD2Twov2s_POST, "ld2", ".2s", 1, false, 16 },
{ AArch64::LD3i8, "ld3", ".b", 1, true, 0 },
{ AArch64::LD3i16, "ld3", ".h", 1, true, 0 },
{ AArch64::LD3i32, "ld3", ".s", 1, true, 0 },
{ AArch64::LD3i64, "ld3", ".d", 1, true, 0 },
{ AArch64::LD3i8_POST, "ld3", ".b", 2, true, 3 },
{ AArch64::LD3i16_POST, "ld3", ".h", 2, true, 6 },
{ AArch64::LD3i32_POST, "ld3", ".s", 2, true, 12 },
{ AArch64::LD3i64_POST, "ld3", ".d", 2, true, 24 },
{ AArch64::LD3Rv16b, "ld3r", ".16b", 0, false, 0 },
{ AArch64::LD3Rv8h, "ld3r", ".8h", 0, false, 0 },
{ AArch64::LD3Rv4s, "ld3r", ".4s", 0, false, 0 },
{ AArch64::LD3Rv2d, "ld3r", ".2d", 0, false, 0 },
{ AArch64::LD3Rv8b, "ld3r", ".8b", 0, false, 0 },
{ AArch64::LD3Rv4h, "ld3r", ".4h", 0, false, 0 },
{ AArch64::LD3Rv2s, "ld3r", ".2s", 0, false, 0 },
{ AArch64::LD3Rv1d, "ld3r", ".1d", 0, false, 0 },
{ AArch64::LD3Rv16b_POST, "ld3r", ".16b", 1, false, 3 },
{ AArch64::LD3Rv8h_POST, "ld3r", ".8h", 1, false, 6 },
{ AArch64::LD3Rv4s_POST, "ld3r", ".4s", 1, false, 12 },
{ AArch64::LD3Rv2d_POST, "ld3r", ".2d", 1, false, 24 },
{ AArch64::LD3Rv8b_POST, "ld3r", ".8b", 1, false, 3 },
{ AArch64::LD3Rv4h_POST, "ld3r", ".4h", 1, false, 6 },
{ AArch64::LD3Rv2s_POST, "ld3r", ".2s", 1, false, 12 },
{ AArch64::LD3Rv1d_POST, "ld3r", ".1d", 1, false, 24 },
{ AArch64::LD3Threev16b, "ld3", ".16b", 0, false, 0 },
{ AArch64::LD3Threev8h, "ld3", ".8h", 0, false, 0 },
{ AArch64::LD3Threev4s, "ld3", ".4s", 0, false, 0 },
{ AArch64::LD3Threev2d, "ld3", ".2d", 0, false, 0 },
{ AArch64::LD3Threev8b, "ld3", ".8b", 0, false, 0 },
{ AArch64::LD3Threev4h, "ld3", ".4h", 0, false, 0 },
{ AArch64::LD3Threev2s, "ld3", ".2s", 0, false, 0 },
{ AArch64::LD3Threev16b_POST, "ld3", ".16b", 1, false, 48 },
{ AArch64::LD3Threev8h_POST, "ld3", ".8h", 1, false, 48 },
{ AArch64::LD3Threev4s_POST, "ld3", ".4s", 1, false, 48 },
{ AArch64::LD3Threev2d_POST, "ld3", ".2d", 1, false, 48 },
{ AArch64::LD3Threev8b_POST, "ld3", ".8b", 1, false, 24 },
{ AArch64::LD3Threev4h_POST, "ld3", ".4h", 1, false, 24 },
{ AArch64::LD3Threev2s_POST, "ld3", ".2s", 1, false, 24 },
{ AArch64::LD4i8, "ld4", ".b", 1, true, 0 },
{ AArch64::LD4i16, "ld4", ".h", 1, true, 0 },
{ AArch64::LD4i32, "ld4", ".s", 1, true, 0 },
{ AArch64::LD4i64, "ld4", ".d", 1, true, 0 },
{ AArch64::LD4i8_POST, "ld4", ".b", 2, true, 4 },
{ AArch64::LD4i16_POST, "ld4", ".h", 2, true, 8 },
{ AArch64::LD4i32_POST, "ld4", ".s", 2, true, 16 },
{ AArch64::LD4i64_POST, "ld4", ".d", 2, true, 32 },
{ AArch64::LD4Rv16b, "ld4r", ".16b", 0, false, 0 },
{ AArch64::LD4Rv8h, "ld4r", ".8h", 0, false, 0 },
{ AArch64::LD4Rv4s, "ld4r", ".4s", 0, false, 0 },
{ AArch64::LD4Rv2d, "ld4r", ".2d", 0, false, 0 },
{ AArch64::LD4Rv8b, "ld4r", ".8b", 0, false, 0 },
{ AArch64::LD4Rv4h, "ld4r", ".4h", 0, false, 0 },
{ AArch64::LD4Rv2s, "ld4r", ".2s", 0, false, 0 },
{ AArch64::LD4Rv1d, "ld4r", ".1d", 0, false, 0 },
{ AArch64::LD4Rv16b_POST, "ld4r", ".16b", 1, false, 4 },
{ AArch64::LD4Rv8h_POST, "ld4r", ".8h", 1, false, 8 },
{ AArch64::LD4Rv4s_POST, "ld4r", ".4s", 1, false, 16 },
{ AArch64::LD4Rv2d_POST, "ld4r", ".2d", 1, false, 32 },
{ AArch64::LD4Rv8b_POST, "ld4r", ".8b", 1, false, 4 },
{ AArch64::LD4Rv4h_POST, "ld4r", ".4h", 1, false, 8 },
{ AArch64::LD4Rv2s_POST, "ld4r", ".2s", 1, false, 16 },
{ AArch64::LD4Rv1d_POST, "ld4r", ".1d", 1, false, 32 },
{ AArch64::LD4Fourv16b, "ld4", ".16b", 0, false, 0 },
{ AArch64::LD4Fourv8h, "ld4", ".8h", 0, false, 0 },
{ AArch64::LD4Fourv4s, "ld4", ".4s", 0, false, 0 },
{ AArch64::LD4Fourv2d, "ld4", ".2d", 0, false, 0 },
{ AArch64::LD4Fourv8b, "ld4", ".8b", 0, false, 0 },
{ AArch64::LD4Fourv4h, "ld4", ".4h", 0, false, 0 },
{ AArch64::LD4Fourv2s, "ld4", ".2s", 0, false, 0 },
{ AArch64::LD4Fourv16b_POST, "ld4", ".16b", 1, false, 64 },
{ AArch64::LD4Fourv8h_POST, "ld4", ".8h", 1, false, 64 },
{ AArch64::LD4Fourv4s_POST, "ld4", ".4s", 1, false, 64 },
{ AArch64::LD4Fourv2d_POST, "ld4", ".2d", 1, false, 64 },
{ AArch64::LD4Fourv8b_POST, "ld4", ".8b", 1, false, 32 },
{ AArch64::LD4Fourv4h_POST, "ld4", ".4h", 1, false, 32 },
{ AArch64::LD4Fourv2s_POST, "ld4", ".2s", 1, false, 32 },
{ AArch64::ST1i8, "st1", ".b", 0, true, 0 },
{ AArch64::ST1i16, "st1", ".h", 0, true, 0 },
{ AArch64::ST1i32, "st1", ".s", 0, true, 0 },
{ AArch64::ST1i64, "st1", ".d", 0, true, 0 },
{ AArch64::ST1i8_POST, "st1", ".b", 1, true, 1 },
{ AArch64::ST1i16_POST, "st1", ".h", 1, true, 2 },
{ AArch64::ST1i32_POST, "st1", ".s", 1, true, 4 },
{ AArch64::ST1i64_POST, "st1", ".d", 1, true, 8 },
{ AArch64::ST1Onev16b, "st1", ".16b", 0, false, 0 },
{ AArch64::ST1Onev8h, "st1", ".8h", 0, false, 0 },
{ AArch64::ST1Onev4s, "st1", ".4s", 0, false, 0 },
{ AArch64::ST1Onev2d, "st1", ".2d", 0, false, 0 },
{ AArch64::ST1Onev8b, "st1", ".8b", 0, false, 0 },
{ AArch64::ST1Onev4h, "st1", ".4h", 0, false, 0 },
{ AArch64::ST1Onev2s, "st1", ".2s", 0, false, 0 },
{ AArch64::ST1Onev1d, "st1", ".1d", 0, false, 0 },
{ AArch64::ST1Onev16b_POST, "st1", ".16b", 1, false, 16 },
{ AArch64::ST1Onev8h_POST, "st1", ".8h", 1, false, 16 },
{ AArch64::ST1Onev4s_POST, "st1", ".4s", 1, false, 16 },
{ AArch64::ST1Onev2d_POST, "st1", ".2d", 1, false, 16 },
{ AArch64::ST1Onev8b_POST, "st1", ".8b", 1, false, 8 },
{ AArch64::ST1Onev4h_POST, "st1", ".4h", 1, false, 8 },
{ AArch64::ST1Onev2s_POST, "st1", ".2s", 1, false, 8 },
{ AArch64::ST1Onev1d_POST, "st1", ".1d", 1, false, 8 },
{ AArch64::ST1Twov16b, "st1", ".16b", 0, false, 0 },
{ AArch64::ST1Twov8h, "st1", ".8h", 0, false, 0 },
{ AArch64::ST1Twov4s, "st1", ".4s", 0, false, 0 },
{ AArch64::ST1Twov2d, "st1", ".2d", 0, false, 0 },
{ AArch64::ST1Twov8b, "st1", ".8b", 0, false, 0 },
{ AArch64::ST1Twov4h, "st1", ".4h", 0, false, 0 },
{ AArch64::ST1Twov2s, "st1", ".2s", 0, false, 0 },
{ AArch64::ST1Twov1d, "st1", ".1d", 0, false, 0 },
{ AArch64::ST1Twov16b_POST, "st1", ".16b", 1, false, 32 },
{ AArch64::ST1Twov8h_POST, "st1", ".8h", 1, false, 32 },
{ AArch64::ST1Twov4s_POST, "st1", ".4s", 1, false, 32 },
{ AArch64::ST1Twov2d_POST, "st1", ".2d", 1, false, 32 },
{ AArch64::ST1Twov8b_POST, "st1", ".8b", 1, false, 16 },
{ AArch64::ST1Twov4h_POST, "st1", ".4h", 1, false, 16 },
{ AArch64::ST1Twov2s_POST, "st1", ".2s", 1, false, 16 },
{ AArch64::ST1Twov1d_POST, "st1", ".1d", 1, false, 16 },
{ AArch64::ST1Threev16b, "st1", ".16b", 0, false, 0 },
{ AArch64::ST1Threev8h, "st1", ".8h", 0, false, 0 },
{ AArch64::ST1Threev4s, "st1", ".4s", 0, false, 0 },
{ AArch64::ST1Threev2d, "st1", ".2d", 0, false, 0 },
{ AArch64::ST1Threev8b, "st1", ".8b", 0, false, 0 },
{ AArch64::ST1Threev4h, "st1", ".4h", 0, false, 0 },
{ AArch64::ST1Threev2s, "st1", ".2s", 0, false, 0 },
{ AArch64::ST1Threev1d, "st1", ".1d", 0, false, 0 },
{ AArch64::ST1Threev16b_POST, "st1", ".16b", 1, false, 48 },
{ AArch64::ST1Threev8h_POST, "st1", ".8h", 1, false, 48 },
{ AArch64::ST1Threev4s_POST, "st1", ".4s", 1, false, 48 },
{ AArch64::ST1Threev2d_POST, "st1", ".2d", 1, false, 48 },
{ AArch64::ST1Threev8b_POST, "st1", ".8b", 1, false, 24 },
{ AArch64::ST1Threev4h_POST, "st1", ".4h", 1, false, 24 },
{ AArch64::ST1Threev2s_POST, "st1", ".2s", 1, false, 24 },
{ AArch64::ST1Threev1d_POST, "st1", ".1d", 1, false, 24 },
{ AArch64::ST1Fourv16b, "st1", ".16b", 0, false, 0 },
{ AArch64::ST1Fourv8h, "st1", ".8h", 0, false, 0 },
{ AArch64::ST1Fourv4s, "st1", ".4s", 0, false, 0 },
{ AArch64::ST1Fourv2d, "st1", ".2d", 0, false, 0 },
{ AArch64::ST1Fourv8b, "st1", ".8b", 0, false, 0 },
{ AArch64::ST1Fourv4h, "st1", ".4h", 0, false, 0 },
{ AArch64::ST1Fourv2s, "st1", ".2s", 0, false, 0 },
{ AArch64::ST1Fourv1d, "st1", ".1d", 0, false, 0 },
{ AArch64::ST1Fourv16b_POST, "st1", ".16b", 1, false, 64 },
{ AArch64::ST1Fourv8h_POST, "st1", ".8h", 1, false, 64 },
{ AArch64::ST1Fourv4s_POST, "st1", ".4s", 1, false, 64 },
{ AArch64::ST1Fourv2d_POST, "st1", ".2d", 1, false, 64 },
{ AArch64::ST1Fourv8b_POST, "st1", ".8b", 1, false, 32 },
{ AArch64::ST1Fourv4h_POST, "st1", ".4h", 1, false, 32 },
{ AArch64::ST1Fourv2s_POST, "st1", ".2s", 1, false, 32 },
{ AArch64::ST1Fourv1d_POST, "st1", ".1d", 1, false, 32 },
{ AArch64::ST2i8, "st2", ".b", 0, true, 0 },
{ AArch64::ST2i16, "st2", ".h", 0, true, 0 },
{ AArch64::ST2i32, "st2", ".s", 0, true, 0 },
{ AArch64::ST2i64, "st2", ".d", 0, true, 0 },
{ AArch64::ST2i8_POST, "st2", ".b", 1, true, 2 },
{ AArch64::ST2i16_POST, "st2", ".h", 1, true, 4 },
{ AArch64::ST2i32_POST, "st2", ".s", 1, true, 8 },
{ AArch64::ST2i64_POST, "st2", ".d", 1, true, 16 },
{ AArch64::ST2Twov16b, "st2", ".16b", 0, false, 0 },
{ AArch64::ST2Twov8h, "st2", ".8h", 0, false, 0 },
{ AArch64::ST2Twov4s, "st2", ".4s", 0, false, 0 },
{ AArch64::ST2Twov2d, "st2", ".2d", 0, false, 0 },
{ AArch64::ST2Twov8b, "st2", ".8b", 0, false, 0 },
{ AArch64::ST2Twov4h, "st2", ".4h", 0, false, 0 },
{ AArch64::ST2Twov2s, "st2", ".2s", 0, false, 0 },
{ AArch64::ST2Twov16b_POST, "st2", ".16b", 1, false, 32 },
{ AArch64::ST2Twov8h_POST, "st2", ".8h", 1, false, 32 },
{ AArch64::ST2Twov4s_POST, "st2", ".4s", 1, false, 32 },
{ AArch64::ST2Twov2d_POST, "st2", ".2d", 1, false, 32 },
{ AArch64::ST2Twov8b_POST, "st2", ".8b", 1, false, 16 },
{ AArch64::ST2Twov4h_POST, "st2", ".4h", 1, false, 16 },
{ AArch64::ST2Twov2s_POST, "st2", ".2s", 1, false, 16 },
{ AArch64::ST3i8, "st3", ".b", 0, true, 0 },
{ AArch64::ST3i16, "st3", ".h", 0, true, 0 },
{ AArch64::ST3i32, "st3", ".s", 0, true, 0 },
{ AArch64::ST3i64, "st3", ".d", 0, true, 0 },
{ AArch64::ST3i8_POST, "st3", ".b", 1, true, 3 },
{ AArch64::ST3i16_POST, "st3", ".h", 1, true, 6 },
{ AArch64::ST3i32_POST, "st3", ".s", 1, true, 12 },
{ AArch64::ST3i64_POST, "st3", ".d", 1, true, 24 },
{ AArch64::ST3Threev16b, "st3", ".16b", 0, false, 0 },
{ AArch64::ST3Threev8h, "st3", ".8h", 0, false, 0 },
{ AArch64::ST3Threev4s, "st3", ".4s", 0, false, 0 },
{ AArch64::ST3Threev2d, "st3", ".2d", 0, false, 0 },
{ AArch64::ST3Threev8b, "st3", ".8b", 0, false, 0 },
{ AArch64::ST3Threev4h, "st3", ".4h", 0, false, 0 },
{ AArch64::ST3Threev2s, "st3", ".2s", 0, false, 0 },
{ AArch64::ST3Threev16b_POST, "st3", ".16b", 1, false, 48 },
{ AArch64::ST3Threev8h_POST, "st3", ".8h", 1, false, 48 },
{ AArch64::ST3Threev4s_POST, "st3", ".4s", 1, false, 48 },
{ AArch64::ST3Threev2d_POST, "st3", ".2d", 1, false, 48 },
{ AArch64::ST3Threev8b_POST, "st3", ".8b", 1, false, 24 },
{ AArch64::ST3Threev4h_POST, "st3", ".4h", 1, false, 24 },
{ AArch64::ST3Threev2s_POST, "st3", ".2s", 1, false, 24 },
{ AArch64::ST4i8, "st4", ".b", 0, true, 0 },
{ AArch64::ST4i16, "st4", ".h", 0, true, 0 },
{ AArch64::ST4i32, "st4", ".s", 0, true, 0 },
{ AArch64::ST4i64, "st4", ".d", 0, true, 0 },
{ AArch64::ST4i8_POST, "st4", ".b", 1, true, 4 },
{ AArch64::ST4i16_POST, "st4", ".h", 1, true, 8 },
{ AArch64::ST4i32_POST, "st4", ".s", 1, true, 16 },
{ AArch64::ST4i64_POST, "st4", ".d", 1, true, 32 },
{ AArch64::ST4Fourv16b, "st4", ".16b", 0, false, 0 },
{ AArch64::ST4Fourv8h, "st4", ".8h", 0, false, 0 },
{ AArch64::ST4Fourv4s, "st4", ".4s", 0, false, 0 },
{ AArch64::ST4Fourv2d, "st4", ".2d", 0, false, 0 },
{ AArch64::ST4Fourv8b, "st4", ".8b", 0, false, 0 },
{ AArch64::ST4Fourv4h, "st4", ".4h", 0, false, 0 },
{ AArch64::ST4Fourv2s, "st4", ".2s", 0, false, 0 },
{ AArch64::ST4Fourv16b_POST, "st4", ".16b", 1, false, 64 },
{ AArch64::ST4Fourv8h_POST, "st4", ".8h", 1, false, 64 },
{ AArch64::ST4Fourv4s_POST, "st4", ".4s", 1, false, 64 },
{ AArch64::ST4Fourv2d_POST, "st4", ".2d", 1, false, 64 },
{ AArch64::ST4Fourv8b_POST, "st4", ".8b", 1, false, 32 },
{ AArch64::ST4Fourv4h_POST, "st4", ".4h", 1, false, 32 },
{ AArch64::ST4Fourv2s_POST, "st4", ".2s", 1, false, 32 },
};
static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
unsigned Idx;
for (Idx = 0; Idx != array_lengthof(LdStNInstInfo); ++Idx)
if (LdStNInstInfo[Idx].Opcode == Opcode)
return &LdStNInstInfo[Idx];
return nullptr;
}
void AArch64AppleInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot,
const MCSubtargetInfo &STI) {
unsigned Opcode = MI->getOpcode();
StringRef Layout;
bool IsTbx;
if (isTblTbxInstruction(MI->getOpcode(), Layout, IsTbx)) {
O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t'
<< getRegisterName(MI->getOperand(0).getReg(), AArch64::vreg) << ", ";
unsigned ListOpNum = IsTbx ? 2 : 1;
printVectorList(MI, ListOpNum, STI, O, "");
O << ", "
<< getRegisterName(MI->getOperand(ListOpNum + 1).getReg(), AArch64::vreg);
printAnnotation(O, Annot);
return;
}
if (const LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
// Now onto the operands: first a vector list with possible lane
// specifier. E.g. { v0 }[2]
int OpNum = LdStDesc->ListOperand;
printVectorList(MI, OpNum++, STI, O, "");
if (LdStDesc->HasLane)
O << '[' << MI->getOperand(OpNum++).getImm() << ']';
// Next the address: [xN]
unsigned AddrReg = MI->getOperand(OpNum++).getReg();
O << ", [" << getRegisterName(AddrReg) << ']';
// Finally, there might be a post-indexed offset.
if (LdStDesc->NaturalOffset != 0) {
unsigned Reg = MI->getOperand(OpNum++).getReg();
if (Reg != AArch64::XZR)
O << ", " << getRegisterName(Reg);
else {
assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
O << ", #" << LdStDesc->NaturalOffset;
}
}
printAnnotation(O, Annot);
return;
}
AArch64InstPrinter::printInst(MI, O, Annot, STI);
}
bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
const MCSubtargetInfo &STI,
raw_ostream &O) {
#ifndef NDEBUG
unsigned Opcode = MI->getOpcode();
assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
#endif
const MCOperand &Op1 = MI->getOperand(0);
const MCOperand &Cn = MI->getOperand(1);
const MCOperand &Cm = MI->getOperand(2);
const MCOperand &Op2 = MI->getOperand(3);
unsigned Op1Val = Op1.getImm();
unsigned CnVal = Cn.getImm();
unsigned CmVal = Cm.getImm();
unsigned Op2Val = Op2.getImm();
uint16_t Encoding = Op2Val;
Encoding |= CmVal << 3;
Encoding |= CnVal << 7;
Encoding |= Op1Val << 11;
bool NeedsReg;
std::string Ins;
std::string Name;
if (CnVal == 7) {
switch (CmVal) {
default: return false;
// Maybe IC, maybe Prediction Restriction
case 1:
switch (Op1Val) {
default: return false;
case 0: goto Search_IC;
case 3: goto Search_PRCTX;
}
// Prediction Restriction aliases
case 3: {
Search_PRCTX:
const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByEncoding(Encoding >> 3);
if (!PRCTX || !PRCTX->haveFeatures(STI.getFeatureBits()))
return false;
NeedsReg = PRCTX->NeedsReg;
switch (Op2Val) {
default: return false;
case 4: Ins = "cfp\t"; break;
case 5: Ins = "dvp\t"; break;
case 7: Ins = "cpp\t"; break;
}
Name = std::string(PRCTX->Name);
}
break;
// IC aliases
case 5: {
Search_IC:
const AArch64IC::IC *IC = AArch64IC::lookupICByEncoding(Encoding);
if (!IC || !IC->haveFeatures(STI.getFeatureBits()))
return false;
NeedsReg = IC->NeedsReg;
Ins = "ic\t";
Name = std::string(IC->Name);
}
break;
// DC aliases
case 4: case 6: case 10: case 11: case 12: case 13: case 14:
{
const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
if (!DC || !DC->haveFeatures(STI.getFeatureBits()))
return false;
NeedsReg = true;
Ins = "dc\t";
Name = std::string(DC->Name);
}
break;
// AT aliases
case 8: case 9: {
const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
if (!AT || !AT->haveFeatures(STI.getFeatureBits()))
return false;
NeedsReg = true;
Ins = "at\t";
Name = std::string(AT->Name);
}
break;
}
} else if (CnVal == 8) {
// TLBI aliases
const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
if (!TLBI || !TLBI->haveFeatures(STI.getFeatureBits()))
return false;
NeedsReg = TLBI->NeedsReg;
Ins = "tlbi\t";
Name = std::string(TLBI->Name);
}
else
return false;
std::string Str = Ins + Name;
std::transform(Str.begin(), Str.end(), Str.begin(), ::tolower);
O << '\t' << Str;
if (NeedsReg)
O << ", " << getRegisterName(MI->getOperand(4).getReg());
return true;
}
void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
unsigned Reg = Op.getReg();
O << getRegisterName(Reg);
} else if (Op.isImm()) {
printImm(MI, OpNo, STI, O);
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
Op.getExpr()->print(O, &MAI);
}
}
void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
O << "#" << formatImm(Op.getImm());
}
void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
O << format("#%#llx", Op.getImm());
}
void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
unsigned Imm, raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
unsigned Reg = Op.getReg();
if (Reg == AArch64::XZR)
O << "#" << Imm;
else
O << getRegisterName(Reg);
} else
llvm_unreachable("unknown operand kind in printPostIncOperand64");
}
void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
assert(Op.isReg() && "Non-register vreg operand!");
unsigned Reg = Op.getReg();
O << getRegisterName(Reg, AArch64::vreg);
}
void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
O << "c" << Op.getImm();
}
void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
if (MO.isImm()) {
unsigned Val = (MO.getImm() & 0xfff);
assert(Val == MO.getImm() && "Add/sub immediate out of range!");
unsigned Shift =
AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
O << '#' << formatImm(Val);
if (Shift != 0)
printShifter(MI, OpNum + 1, STI, O);
if (CommentStream)
*CommentStream << '=' << formatImm(Val << Shift) << '\n';
} else {
assert(MO.isExpr() && "Unexpected operand type!");
MO.getExpr()->print(O, &MAI);
printShifter(MI, OpNum + 1, STI, O);
}
}
template <typename T>
void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
uint64_t Val = MI->getOperand(OpNum).getImm();
O << "#0x";
O.write_hex(AArch64_AM::decodeLogicalImmediate(Val, 8 * sizeof(T)));
}
void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNum).getImm();
// LSL #0 should not be printed.
if (AArch64_AM::getShiftType(Val) == AArch64_AM::LSL &&
AArch64_AM::getShiftValue(Val) == 0)
return;
O << ", " << AArch64_AM::getShiftExtendName(AArch64_AM::getShiftType(Val))
<< " #" << AArch64_AM::getShiftValue(Val);
}
void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
O << getRegisterName(MI->getOperand(OpNum).getReg());
printShifter(MI, OpNum + 1, STI, O);
}
void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
O << getRegisterName(MI->getOperand(OpNum).getReg());
printArithExtend(MI, OpNum + 1, STI, O);
}
void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNum).getImm();
AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Val);
unsigned ShiftVal = AArch64_AM::getArithShiftValue(Val);
// If the destination or first source register operand is [W]SP, print
// UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
// all.
if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
unsigned Dest = MI->getOperand(0).getReg();
unsigned Src1 = MI->getOperand(1).getReg();
if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
ExtType == AArch64_AM::UXTX) ||
((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
ExtType == AArch64_AM::UXTW) ) {
if (ShiftVal != 0)
O << ", lsl #" << ShiftVal;
return;
}
}
O << ", " << AArch64_AM::getShiftExtendName(ExtType);
if (ShiftVal != 0)
O << " #" << ShiftVal;
}
static void printMemExtendImpl(bool SignExtend, bool DoShift,
unsigned Width, char SrcRegKind,
raw_ostream &O) {
// sxtw, sxtx, uxtw or lsl (== uxtx)
bool IsLSL = !SignExtend && SrcRegKind == 'x';
if (IsLSL)
O << "lsl";
else
O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
if (DoShift || IsLSL)
O << " #" << Log2_32(Width / 8);
}
void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
raw_ostream &O, char SrcRegKind,
unsigned Width) {
bool SignExtend = MI->getOperand(OpNum).getImm();
bool DoShift = MI->getOperand(OpNum + 1).getImm();
printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
}
template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
printOperand(MI, OpNum, STI, O);
if (Suffix == 's' || Suffix == 'd')
O << '.' << Suffix;
else
assert(Suffix == 0 && "Unsupported suffix size");
bool DoShift = ExtWidth != 8;
if (SignExtend || DoShift || SrcRegKind == 'w') {
O << ", ";
printMemExtendImpl(SignExtend, DoShift, ExtWidth, SrcRegKind, O);
}
}
void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
O << AArch64CC::getCondCodeName(CC);
}
void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
O << AArch64CC::getCondCodeName(AArch64CC::getInvertedCondCode(CC));
}
void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']';
}
template<int Scale>
void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
O << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm());
}
void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO = MI->getOperand(OpNum);
if (MO.isImm()) {
O << "#" << formatImm(MO.getImm() * Scale);
} else {
assert(MO.isExpr() && "Unexpected operand type!");
MO.getExpr()->print(O, &MAI);
}
}
void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO1 = MI->getOperand(OpNum + 1);
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
if (MO1.isImm()) {
O << ", #" << formatImm(MO1.getImm() * Scale);
} else {
assert(MO1.isExpr() && "Unexpected operand type!");
O << ", ";
MO1.getExpr()->print(O, &MAI);
}
O << ']';
}
template <bool IsSVEPrefetch>
void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned prfop = MI->getOperand(OpNum).getImm();
if (IsSVEPrefetch) {
if (auto PRFM = AArch64SVEPRFM::lookupSVEPRFMByEncoding(prfop)) {
O << PRFM->Name;
return;
}
} else if (auto PRFM = AArch64PRFM::lookupPRFMByEncoding(prfop)) {
O << PRFM->Name;
return;
}
O << '#' << formatImm(prfop);
}
void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned psbhintop = MI->getOperand(OpNum).getImm();
auto PSB = AArch64PSBHint::lookupPSBByEncoding(psbhintop);
if (PSB)
O << PSB->Name;
else
O << '#' << formatImm(psbhintop);
}
void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned btihintop = (MI->getOperand(OpNum).getImm() ^ 32) >> 1;
auto BTI = AArch64BTIHint::lookupBTIByEncoding(btihintop);
if (BTI)
O << BTI->Name;
else
O << '#' << formatImm(btihintop);
}
void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
float FPImm =
MO.isFPImm() ? MO.getFPImm() : AArch64_AM::getFPImmFloat(MO.getImm());
// 8 decimal places are enough to perfectly represent permitted floats.
O << format("#%.8f", FPImm);
}
static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
while (Stride--) {
switch (Reg) {
default:
llvm_unreachable("Vector register expected!");
case AArch64::Q0: Reg = AArch64::Q1; break;
case AArch64::Q1: Reg = AArch64::Q2; break;
case AArch64::Q2: Reg = AArch64::Q3; break;
case AArch64::Q3: Reg = AArch64::Q4; break;
case AArch64::Q4: Reg = AArch64::Q5; break;
case AArch64::Q5: Reg = AArch64::Q6; break;
case AArch64::Q6: Reg = AArch64::Q7; break;
case AArch64::Q7: Reg = AArch64::Q8; break;
case AArch64::Q8: Reg = AArch64::Q9; break;
case AArch64::Q9: Reg = AArch64::Q10; break;
case AArch64::Q10: Reg = AArch64::Q11; break;
case AArch64::Q11: Reg = AArch64::Q12; break;
case AArch64::Q12: Reg = AArch64::Q13; break;
case AArch64::Q13: Reg = AArch64::Q14; break;
case AArch64::Q14: Reg = AArch64::Q15; break;
case AArch64::Q15: Reg = AArch64::Q16; break;
case AArch64::Q16: Reg = AArch64::Q17; break;
case AArch64::Q17: Reg = AArch64::Q18; break;
case AArch64::Q18: Reg = AArch64::Q19; break;
case AArch64::Q19: Reg = AArch64::Q20; break;
case AArch64::Q20: Reg = AArch64::Q21; break;
case AArch64::Q21: Reg = AArch64::Q22; break;
case AArch64::Q22: Reg = AArch64::Q23; break;
case AArch64::Q23: Reg = AArch64::Q24; break;
case AArch64::Q24: Reg = AArch64::Q25; break;
case AArch64::Q25: Reg = AArch64::Q26; break;
case AArch64::Q26: Reg = AArch64::Q27; break;
case AArch64::Q27: Reg = AArch64::Q28; break;
case AArch64::Q28: Reg = AArch64::Q29; break;
case AArch64::Q29: Reg = AArch64::Q30; break;
case AArch64::Q30: Reg = AArch64::Q31; break;
// Vector lists can wrap around.
case AArch64::Q31:
Reg = AArch64::Q0;
break;
case AArch64::Z0: Reg = AArch64::Z1; break;
case AArch64::Z1: Reg = AArch64::Z2; break;
case AArch64::Z2: Reg = AArch64::Z3; break;
case AArch64::Z3: Reg = AArch64::Z4; break;
case AArch64::Z4: Reg = AArch64::Z5; break;
case AArch64::Z5: Reg = AArch64::Z6; break;
case AArch64::Z6: Reg = AArch64::Z7; break;
case AArch64::Z7: Reg = AArch64::Z8; break;
case AArch64::Z8: Reg = AArch64::Z9; break;
case AArch64::Z9: Reg = AArch64::Z10; break;
case AArch64::Z10: Reg = AArch64::Z11; break;
case AArch64::Z11: Reg = AArch64::Z12; break;
case AArch64::Z12: Reg = AArch64::Z13; break;
case AArch64::Z13: Reg = AArch64::Z14; break;
case AArch64::Z14: Reg = AArch64::Z15; break;
case AArch64::Z15: Reg = AArch64::Z16; break;
case AArch64::Z16: Reg = AArch64::Z17; break;
case AArch64::Z17: Reg = AArch64::Z18; break;
case AArch64::Z18: Reg = AArch64::Z19; break;
case AArch64::Z19: Reg = AArch64::Z20; break;
case AArch64::Z20: Reg = AArch64::Z21; break;
case AArch64::Z21: Reg = AArch64::Z22; break;
case AArch64::Z22: Reg = AArch64::Z23; break;
case AArch64::Z23: Reg = AArch64::Z24; break;
case AArch64::Z24: Reg = AArch64::Z25; break;
case AArch64::Z25: Reg = AArch64::Z26; break;
case AArch64::Z26: Reg = AArch64::Z27; break;
case AArch64::Z27: Reg = AArch64::Z28; break;
case AArch64::Z28: Reg = AArch64::Z29; break;
case AArch64::Z29: Reg = AArch64::Z30; break;
case AArch64::Z30: Reg = AArch64::Z31; break;
// Vector lists can wrap around.
case AArch64::Z31:
Reg = AArch64::Z0;
break;
}
}
return Reg;
}
template<unsigned size>
void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
static_assert(size == 64 || size == 32,
"Template parameter must be either 32 or 64");
unsigned Reg = MI->getOperand(OpNum).getReg();
unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
unsigned Even = MRI.getSubReg(Reg, Sube);
unsigned Odd = MRI.getSubReg(Reg, Subo);
O << getRegisterName(Even) << ", " << getRegisterName(Odd);
}
void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O,
StringRef LayoutSuffix) {
unsigned Reg = MI->getOperand(OpNum).getReg();
O << "{ ";
// Work out how many registers there are in the list (if there is an actual
// list).
unsigned NumRegs = 1;
if (MRI.getRegClass(AArch64::DDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR2RegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::QQRegClassID).contains(Reg))
NumRegs = 2;
else if (MRI.getRegClass(AArch64::DDDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR3RegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::QQQRegClassID).contains(Reg))
NumRegs = 3;
else if (MRI.getRegClass(AArch64::DDDDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR4RegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::QQQQRegClassID).contains(Reg))
NumRegs = 4;
// Now forget about the list and find out what the first register is.
if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
Reg = FirstReg;
else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::qsub0))
Reg = FirstReg;
else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::zsub0))
Reg = FirstReg;
// If it's a D-reg, we need to promote it to the equivalent Q-reg before
// printing (otherwise getRegisterName fails).
if (MRI.getRegClass(AArch64::FPR64RegClassID).contains(Reg)) {
const MCRegisterClass &FPR128RC =
MRI.getRegClass(AArch64::FPR128RegClassID);
Reg = MRI.getMatchingSuperReg(Reg, AArch64::dsub, &FPR128RC);
}
for (unsigned i = 0; i < NumRegs; ++i, Reg = getNextVectorRegister(Reg)) {
if (MRI.getRegClass(AArch64::ZPRRegClassID).contains(Reg))
O << getRegisterName(Reg) << LayoutSuffix;
else
O << getRegisterName(Reg, AArch64::vreg) << LayoutSuffix;
if (i + 1 != NumRegs)
O << ", ";
}
O << " }";
}
void
AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
printVectorList(MI, OpNum, STI, O, "");
}
template <unsigned NumLanes, char LaneKind>
void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
std::string Suffix(".");
if (NumLanes)
Suffix += itostr(NumLanes) + LaneKind;
else
Suffix += LaneKind;
printVectorList(MI, OpNum, STI, O, Suffix);
}
void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
O << "[" << MI->getOperand(OpNum).getImm() << "]";
}
void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNum);
// If the label has already been resolved to an immediate offset (say, when
// we're running the disassembler), just print the immediate.
if (Op.isImm()) {
O << "#" << formatImm(Op.getImm() * 4);
return;
}
// If the branch target is simply an address then print it in hex.
const MCConstantExpr *BranchTarget =
dyn_cast<MCConstantExpr>(MI->getOperand(OpNum).getExpr());
int64_t Address;
if (BranchTarget && BranchTarget->evaluateAsAbsolute(Address)) {
O << "0x";
O.write_hex(Address);
} else {
// Otherwise, just print the expression.
MI->getOperand(OpNum).getExpr()->print(O, &MAI);
}
}
void AArch64InstPrinter::printAdrpLabel(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNum);
// If the label has already been resolved to an immediate offset (say, when
// we're running the disassembler), just print the immediate.
if (Op.isImm()) {
O << "#" << formatImm(Op.getImm() * (1 << 12));
return;
}
// Otherwise, just print the expression.
MI->getOperand(OpNum).getExpr()->print(O, &MAI);
}
void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
unsigned Opcode = MI->getOpcode();
StringRef Name;
if (Opcode == AArch64::ISB) {
auto ISB = AArch64ISB::lookupISBByEncoding(Val);
Name = ISB ? ISB->Name : "";
} else if (Opcode == AArch64::TSB) {
auto TSB = AArch64TSB::lookupTSBByEncoding(Val);
Name = TSB ? TSB->Name : "";
} else {
auto DB = AArch64DB::lookupDBByEncoding(Val);
Name = DB ? DB->Name : "";
}
if (!Name.empty())
O << Name;
else
O << "#" << Val;
}
void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
// Horrible hack for the one register that has identical encodings but
// different names in MSR and MRS. Because of this, one of MRS and MSR is
// going to get the wrong entry
if (Val == AArch64SysReg::DBGDTRRX_EL0) {
O << "DBGDTRRX_EL0";
return;
}
const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
if (Reg && Reg->Readable && Reg->haveFeatures(STI.getFeatureBits()))
O << Reg->Name;
else
O << AArch64SysReg::genericRegisterString(Val);
}
void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
// Horrible hack for the one register that has identical encodings but
// different names in MSR and MRS. Because of this, one of MRS and MSR is
// going to get the wrong entry
if (Val == AArch64SysReg::DBGDTRTX_EL0) {
O << "DBGDTRTX_EL0";
return;
}
const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
if (Reg && Reg->Writeable && Reg->haveFeatures(STI.getFeatureBits()))
O << Reg->Name;
else
O << AArch64SysReg::genericRegisterString(Val);
}
void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
auto PState = AArch64PState::lookupPStateByEncoding(Val);
if (PState && PState->haveFeatures(STI.getFeatureBits()))
O << PState->Name;
else
O << "#" << formatImm(Val);
}
void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned RawVal = MI->getOperand(OpNo).getImm();
uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(RawVal);
O << format("#%#016llx", Val);
}
template<int64_t Angle, int64_t Remainder>
void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
O << "#" << (Val * Angle) + Remainder;
}
void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNum).getImm();
if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Val))
O << Pat->Name;
else
O << '#' << formatImm(Val);
}
template <char suffix>
void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
switch (suffix) {
case 0:
case 'b':
case 'h':
case 's':
case 'd':
case 'q':
break;
default: llvm_unreachable("Invalid kind specifier.");
}
unsigned Reg = MI->getOperand(OpNum).getReg();
O << getRegisterName(Reg);
if (suffix != 0)
O << '.' << suffix;
}
template <typename T>
void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
typename std::make_unsigned<T>::type HexValue = Value;
if (getPrintImmHex())
O << '#' << formatHex((uint64_t)HexValue);
else
O << '#' << formatDec(Value);
if (CommentStream) {
// Do the opposite to that used for instruction operands.
if (getPrintImmHex())
*CommentStream << '=' << formatDec(HexValue) << '\n';
else
*CommentStream << '=' << formatHex((uint64_t)Value) << '\n';
}
}
template <typename T>
void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned UnscaledVal = MI->getOperand(OpNum).getImm();
unsigned Shift = MI->getOperand(OpNum + 1).getImm();
assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
"Unexepected shift type!");
// #0 lsl #8 is never pretty printed
if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Shift) != 0)) {
O << '#' << formatImm(UnscaledVal);
printShifter(MI, OpNum + 1, STI, O);
return;
}
T Val;
if (std::is_signed<T>())
Val = (int8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Shift));
else
Val = (uint8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Shift));
printImmSVE(Val, O);
}
template <typename T>
void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
typedef typename std::make_signed<T>::type SignedT;
typedef typename std::make_unsigned<T>::type UnsignedT;
uint64_t Val = MI->getOperand(OpNum).getImm();
UnsignedT PrintVal = AArch64_AM::decodeLogicalImmediate(Val, 64);
// Prefer the default format for 16bit values, hex otherwise.
if ((int16_t)PrintVal == (SignedT)PrintVal)
printImmSVE((T)PrintVal, O);
else if ((uint16_t)PrintVal == PrintVal)
printImmSVE(PrintVal, O);
else
O << '#' << formatHex((uint64_t)PrintVal);
}
template <int Width>
void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Base;
switch (Width) {
case 8: Base = AArch64::B0; break;
case 16: Base = AArch64::H0; break;
case 32: Base = AArch64::S0; break;
case 64: Base = AArch64::D0; break;
case 128: Base = AArch64::Q0; break;
default:
llvm_unreachable("Unsupported width");
}
unsigned Reg = MI->getOperand(OpNum).getReg();
O << getRegisterName(Reg - AArch64::Z0 + Base);
}
template <unsigned ImmIs0, unsigned ImmIs1>
void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs0);
auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs1);
unsigned Val = MI->getOperand(OpNum).getImm();
O << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
}
void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Reg = MI->getOperand(OpNum).getReg();
O << getRegisterName(getWRegFromXReg(Reg));
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp (revision 356465)
@@ -1,1408 +1,1409 @@
//===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements hazard recognizers for scheduling on GCN processors.
//
//===----------------------------------------------------------------------===//
#include "GCNHazardRecognizer.h"
#include "AMDGPUSubtarget.h"
#include "SIDefines.h"
#include "SIInstrInfo.h"
#include "SIRegisterInfo.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <limits>
#include <set>
#include <vector>
using namespace llvm;
//===----------------------------------------------------------------------===//
// Hazard Recoginizer Implementation
//===----------------------------------------------------------------------===//
GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
IsHazardRecognizerMode(false),
CurrCycleInstr(nullptr),
MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*ST.getInstrInfo()),
TRI(TII.getRegisterInfo()),
ClauseUses(TRI.getNumRegUnits()),
ClauseDefs(TRI.getNumRegUnits()) {
MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 18 : 5;
TSchedModel.init(&ST);
}
void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
EmitInstruction(SU->getInstr());
}
void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
CurrCycleInstr = MI;
}
static bool isDivFMas(unsigned Opcode) {
return Opcode == AMDGPU::V_DIV_FMAS_F32 || Opcode == AMDGPU::V_DIV_FMAS_F64;
}
static bool isSGetReg(unsigned Opcode) {
return Opcode == AMDGPU::S_GETREG_B32;
}
static bool isSSetReg(unsigned Opcode) {
return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32;
}
static bool isRWLane(unsigned Opcode) {
return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
}
static bool isRFE(unsigned Opcode) {
return Opcode == AMDGPU::S_RFE_B64;
}
static bool isSMovRel(unsigned Opcode) {
switch (Opcode) {
case AMDGPU::S_MOVRELS_B32:
case AMDGPU::S_MOVRELS_B64:
case AMDGPU::S_MOVRELD_B32:
case AMDGPU::S_MOVRELD_B64:
return true;
default:
return false;
}
}
static bool isSendMsgTraceDataOrGDS(const SIInstrInfo &TII,
const MachineInstr &MI) {
if (TII.isAlwaysGDS(MI.getOpcode()))
return true;
switch (MI.getOpcode()) {
case AMDGPU::S_SENDMSG:
case AMDGPU::S_SENDMSGHALT:
case AMDGPU::S_TTRACEDATA:
return true;
// These DS opcodes don't support GDS.
case AMDGPU::DS_NOP:
case AMDGPU::DS_PERMUTE_B32:
case AMDGPU::DS_BPERMUTE_B32:
return false;
default:
if (TII.isDS(MI.getOpcode())) {
int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::gds);
if (MI.getOperand(GDS).getImm())
return true;
}
return false;
}
}
static bool isPermlane(const MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
return Opcode == AMDGPU::V_PERMLANE16_B32 ||
Opcode == AMDGPU::V_PERMLANEX16_B32;
}
static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
AMDGPU::OpName::simm16);
return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
}
ScheduleHazardRecognizer::HazardType
GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
MachineInstr *MI = SU->getInstr();
if (MI->isBundle())
return NoHazard;
if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
return NoopHazard;
// FIXME: Should flat be considered vmem?
if ((SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI))
&& checkVMEMHazards(MI) > 0)
return NoopHazard;
if (ST.hasNSAtoVMEMBug() && checkNSAtoVMEMHazard(MI) > 0)
return NoopHazard;
if (checkFPAtomicToDenormModeHazard(MI) > 0)
return NoopHazard;
if (ST.hasNoDataDepHazard())
return NoHazard;
if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
return NoopHazard;
if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
return NoopHazard;
if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
return NoopHazard;
if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
return NoopHazard;
if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
return NoopHazard;
if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
return NoopHazard;
if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
return NoopHazard;
if (ST.hasReadM0MovRelInterpHazard() &&
(TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode())) &&
checkReadM0Hazards(MI) > 0)
return NoopHazard;
if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI) &&
checkReadM0Hazards(MI) > 0)
return NoopHazard;
if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0)
return NoopHazard;
if ((MI->mayLoad() || MI->mayStore()) && checkMAILdStHazards(MI) > 0)
return NoopHazard;
if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
return NoopHazard;
if (checkAnyInstHazards(MI) > 0)
return NoopHazard;
return NoHazard;
}
static void insertNoopInBundle(MachineInstr *MI, const SIInstrInfo &TII) {
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP))
.addImm(0);
}
void GCNHazardRecognizer::processBundle() {
MachineBasicBlock::instr_iterator MI = std::next(CurrCycleInstr->getIterator());
MachineBasicBlock::instr_iterator E = CurrCycleInstr->getParent()->instr_end();
// Check bundled MachineInstr's for hazards.
for (; MI != E && MI->isInsideBundle(); ++MI) {
CurrCycleInstr = &*MI;
unsigned WaitStates = PreEmitNoopsCommon(CurrCycleInstr);
if (IsHazardRecognizerMode)
fixHazards(CurrCycleInstr);
for (unsigned i = 0; i < WaitStates; ++i)
insertNoopInBundle(CurrCycleInstr, TII);
// It’s unnecessary to track more than MaxLookAhead instructions. Since we
// include the bundled MI directly after, only add a maximum of
// (MaxLookAhead - 1) noops to EmittedInstrs.
for (unsigned i = 0, e = std::min(WaitStates, MaxLookAhead - 1); i < e; ++i)
EmittedInstrs.push_front(nullptr);
EmittedInstrs.push_front(CurrCycleInstr);
EmittedInstrs.resize(MaxLookAhead);
}
CurrCycleInstr = nullptr;
}
unsigned GCNHazardRecognizer::PreEmitNoops(SUnit *SU) {
IsHazardRecognizerMode = false;
return PreEmitNoopsCommon(SU->getInstr());
}
unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
IsHazardRecognizerMode = true;
CurrCycleInstr = MI;
unsigned W = PreEmitNoopsCommon(MI);
fixHazards(MI);
CurrCycleInstr = nullptr;
return W;
}
unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
if (MI->isBundle())
return 0;
int WaitStates = std::max(0, checkAnyInstHazards(MI));
if (SIInstrInfo::isSMRD(*MI))
return std::max(WaitStates, checkSMRDHazards(MI));
if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI))
WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
if (ST.hasNSAtoVMEMBug())
WaitStates = std::max(WaitStates, checkNSAtoVMEMHazard(MI));
WaitStates = std::max(WaitStates, checkFPAtomicToDenormModeHazard(MI));
if (ST.hasNoDataDepHazard())
return WaitStates;
if (SIInstrInfo::isVALU(*MI))
WaitStates = std::max(WaitStates, checkVALUHazards(MI));
if (SIInstrInfo::isDPP(*MI))
WaitStates = std::max(WaitStates, checkDPPHazards(MI));
if (isDivFMas(MI->getOpcode()))
WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
if (isRWLane(MI->getOpcode()))
WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
if (MI->isInlineAsm())
return std::max(WaitStates, checkInlineAsmHazards(MI));
if (isSGetReg(MI->getOpcode()))
return std::max(WaitStates, checkGetRegHazards(MI));
if (isSSetReg(MI->getOpcode()))
return std::max(WaitStates, checkSetRegHazards(MI));
if (isRFE(MI->getOpcode()))
return std::max(WaitStates, checkRFEHazards(MI));
if (ST.hasReadM0MovRelInterpHazard() && (TII.isVINTRP(*MI) ||
isSMovRel(MI->getOpcode())))
return std::max(WaitStates, checkReadM0Hazards(MI));
if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI))
return std::max(WaitStates, checkReadM0Hazards(MI));
if (SIInstrInfo::isMAI(*MI))
return std::max(WaitStates, checkMAIHazards(MI));
if (MI->mayLoad() || MI->mayStore())
return std::max(WaitStates, checkMAILdStHazards(MI));
return WaitStates;
}
void GCNHazardRecognizer::EmitNoop() {
EmittedInstrs.push_front(nullptr);
}
void GCNHazardRecognizer::AdvanceCycle() {
// When the scheduler detects a stall, it will call AdvanceCycle() without
// emitting any instructions.
if (!CurrCycleInstr)
return;
// Do not track non-instructions which do not affect the wait states.
// If included, these instructions can lead to buffer overflow such that
// detectable hazards are missed.
if (CurrCycleInstr->isImplicitDef() || CurrCycleInstr->isDebugInstr() ||
CurrCycleInstr->isKill())
return;
if (CurrCycleInstr->isBundle()) {
processBundle();
return;
}
unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
// Keep track of emitted instructions
EmittedInstrs.push_front(CurrCycleInstr);
// Add a nullptr for each additional wait state after the first. Make sure
// not to add more than getMaxLookAhead() items to the list, since we
// truncate the list to that size right after this loop.
for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
i < e; ++i) {
EmittedInstrs.push_front(nullptr);
}
// getMaxLookahead() is the largest number of wait states we will ever need
// to insert, so there is no point in keeping track of more than that many
// wait states.
EmittedInstrs.resize(getMaxLookAhead());
CurrCycleInstr = nullptr;
}
void GCNHazardRecognizer::RecedeCycle() {
llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
}
//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//
typedef function_ref<bool(MachineInstr *, int WaitStates)> IsExpiredFn;
// Returns a minimum wait states since \p I walking all predecessors.
// Only scans until \p IsExpired does not return true.
// Can only be run in a hazard recognizer mode.
static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
MachineBasicBlock *MBB,
MachineBasicBlock::reverse_instr_iterator I,
int WaitStates,
IsExpiredFn IsExpired,
DenseSet<const MachineBasicBlock *> &Visited) {
for (auto E = MBB->instr_rend(); I != E; ++I) {
// Don't add WaitStates for parent BUNDLE instructions.
if (I->isBundle())
continue;
if (IsHazard(&*I))
return WaitStates;
if (I->isInlineAsm() || I->isImplicitDef() || I->isDebugInstr())
continue;
WaitStates += SIInstrInfo::getNumWaitStates(*I);
if (IsExpired(&*I, WaitStates))
return std::numeric_limits<int>::max();
}
int MinWaitStates = WaitStates;
bool Found = false;
for (MachineBasicBlock *Pred : MBB->predecessors()) {
if (!Visited.insert(Pred).second)
continue;
int W = getWaitStatesSince(IsHazard, Pred, Pred->instr_rbegin(),
WaitStates, IsExpired, Visited);
if (W == std::numeric_limits<int>::max())
continue;
MinWaitStates = Found ? std::min(MinWaitStates, W) : W;
if (IsExpired(nullptr, MinWaitStates))
return MinWaitStates;
Found = true;
}
if (Found)
return MinWaitStates;
return std::numeric_limits<int>::max();
}
static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
MachineInstr *MI,
IsExpiredFn IsExpired) {
DenseSet<const MachineBasicBlock *> Visited;
return getWaitStatesSince(IsHazard, MI->getParent(),
std::next(MI->getReverseIterator()),
0, IsExpired, Visited);
}
int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) {
if (IsHazardRecognizerMode) {
auto IsExpiredFn = [Limit] (MachineInstr *, int WaitStates) {
return WaitStates >= Limit;
};
return ::getWaitStatesSince(IsHazard, CurrCycleInstr, IsExpiredFn);
}
int WaitStates = 0;
for (MachineInstr *MI : EmittedInstrs) {
if (MI) {
if (IsHazard(MI))
return WaitStates;
if (MI->isInlineAsm())
continue;
}
++WaitStates;
if (WaitStates >= Limit)
break;
}
return std::numeric_limits<int>::max();
}
int GCNHazardRecognizer::getWaitStatesSinceDef(unsigned Reg,
IsHazardFn IsHazardDef,
int Limit) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
auto IsHazardFn = [IsHazardDef, TRI, Reg] (MachineInstr *MI) {
return IsHazardDef(MI) && MI->modifiesRegister(Reg, TRI);
};
return getWaitStatesSince(IsHazardFn, Limit);
}
int GCNHazardRecognizer::getWaitStatesSinceSetReg(IsHazardFn IsHazard,
int Limit) {
auto IsHazardFn = [IsHazard] (MachineInstr *MI) {
return isSSetReg(MI->getOpcode()) && IsHazard(MI);
};
return getWaitStatesSince(IsHazardFn, Limit);
}
//===----------------------------------------------------------------------===//
// No-op Hazard Detection
//===----------------------------------------------------------------------===//
static void addRegUnits(const SIRegisterInfo &TRI,
BitVector &BV, unsigned Reg) {
for (MCRegUnitIterator RUI(Reg, &TRI); RUI.isValid(); ++RUI)
BV.set(*RUI);
}
static void addRegsToSet(const SIRegisterInfo &TRI,
iterator_range<MachineInstr::const_mop_iterator> Ops,
BitVector &Set) {
for (const MachineOperand &Op : Ops) {
if (Op.isReg())
addRegUnits(TRI, Set, Op.getReg());
}
}
void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) {
// XXX: Do we need to worry about implicit operands
addRegsToSet(TRI, MI.defs(), ClauseDefs);
addRegsToSet(TRI, MI.uses(), ClauseUses);
}
int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) {
// SMEM soft clause are only present on VI+, and only matter if xnack is
// enabled.
if (!ST.isXNACKEnabled())
return 0;
bool IsSMRD = TII.isSMRD(*MEM);
resetClause();
// A soft-clause is any group of consecutive SMEM instructions. The
// instructions in this group may return out of order and/or may be
// replayed (i.e. the same instruction issued more than once).
//
// In order to handle these situations correctly we need to make sure that
// when a clause has more than one instruction, no instruction in the clause
// writes to a register that is read by another instruction in the clause
// (including itself). If we encounter this situaion, we need to break the
// clause by inserting a non SMEM instruction.
for (MachineInstr *MI : EmittedInstrs) {
// When we hit a non-SMEM instruction then we have passed the start of the
// clause and we can stop.
if (!MI)
break;
if (IsSMRD != SIInstrInfo::isSMRD(*MI))
break;
addClauseInst(*MI);
}
if (ClauseDefs.none())
return 0;
// We need to make sure not to put loads and stores in the same clause if they
// use the same address. For now, just start a new clause whenever we see a
// store.
if (MEM->mayStore())
return 1;
addClauseInst(*MEM);
// If the set of defs and uses intersect then we cannot add this instruction
// to the clause, so we have a hazard.
return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0;
}
int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
int WaitStatesNeeded = 0;
WaitStatesNeeded = checkSoftClauseHazards(SMRD);
// This SMRD hazard only affects SI.
if (!ST.hasSMRDReadVALUDefHazard())
return WaitStatesNeeded;
// A read of an SGPR by SMRD instruction requires 4 wait states when the
// SGPR was written by a VALU instruction.
int SmrdSgprWaitStates = 4;
auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
auto IsBufferHazardDefFn = [this] (MachineInstr *MI) { return TII.isSALU(*MI); };
bool IsBufferSMRD = TII.isBufferSMRD(*SMRD);
for (const MachineOperand &Use : SMRD->uses()) {
if (!Use.isReg())
continue;
int WaitStatesNeededForUse =
SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
SmrdSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
// This fixes what appears to be undocumented hardware behavior in SI where
// s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
// needs some number of nops in between. We don't know how many we need, but
// let's use 4. This wasn't discovered before probably because the only
// case when this happens is when we expand a 64-bit pointer into a full
// descriptor and use s_buffer_load_dword instead of s_load_dword, which was
// probably never encountered in the closed-source land.
if (IsBufferSMRD) {
int WaitStatesNeededForUse =
SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
IsBufferHazardDefFn,
SmrdSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
if (!ST.hasVMEMReadSGPRVALUDefHazard())
return 0;
int WaitStatesNeeded = checkSoftClauseHazards(VMEM);
// A read of an SGPR by a VMEM instruction requires 5 wait states when the
// SGPR was written by a VALU Instruction.
const int VmemSgprWaitStates = 5;
auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
for (const MachineOperand &Use : VMEM->uses()) {
if (!Use.isReg() || TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse =
VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
VmemSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
// Check for DPP VGPR read after VALU VGPR write and EXEC write.
int DppVgprWaitStates = 2;
int DppExecWaitStates = 5;
int WaitStatesNeeded = 0;
auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
for (const MachineOperand &Use : DPP->uses()) {
if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse =
DppVgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
[](MachineInstr *) { return true; },
DppVgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
WaitStatesNeeded = std::max(
WaitStatesNeeded,
DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn,
DppExecWaitStates));
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
const SIInstrInfo *TII = ST.getInstrInfo();
// v_div_fmas requires 4 wait states after a write to vcc from a VALU
// instruction.
const int DivFMasWaitStates = 4;
auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn,
DivFMasWaitStates);
return DivFMasWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
const int GetRegWaitStates = 2;
auto IsHazardFn = [TII, GetRegHWReg] (MachineInstr *MI) {
return GetRegHWReg == getHWReg(TII, *MI);
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, GetRegWaitStates);
return GetRegWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned HWReg = getHWReg(TII, *SetRegInstr);
const int SetRegWaitStates = ST.getSetRegWaitStates();
auto IsHazardFn = [TII, HWReg] (MachineInstr *MI) {
return HWReg == getHWReg(TII, *MI);
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, SetRegWaitStates);
return SetRegWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
if (!MI.mayStore())
return -1;
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned Opcode = MI.getOpcode();
const MCInstrDesc &Desc = MI.getDesc();
int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
int VDataRCID = -1;
if (VDataIdx != -1)
VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
// There is no hazard if the instruction does not use vector regs
// (like wbinvl1)
if (VDataIdx == -1)
return -1;
// For MUBUF/MTBUF instructions this hazard only exists if the
// instruction is not using a register in the soffset field.
const MachineOperand *SOffset =
TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
// If we have no soffset operand, then assume this field has been
// hardcoded to zero.
if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
(!SOffset || !SOffset->isReg()))
return VDataIdx;
}
// MIMG instructions create a hazard if they don't use a 256-bit T# and
// the store size is greater than 8 bytes and they have more than two bits
// of their dmask set.
// All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
if (TII->isMIMG(MI)) {
int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
assert(SRsrcIdx != -1 &&
AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
(void)SRsrcIdx;
}
if (TII->isFLAT(MI)) {
int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
return DataIdx;
}
return -1;
}
int GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
const MachineRegisterInfo &MRI) {
// Helper to check for the hazard where VMEM instructions that store more than
// 8 bytes can have there store data over written by the next instruction.
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const int VALUWaitStates = 1;
int WaitStatesNeeded = 0;
if (!TRI->isVGPR(MRI, Def.getReg()))
return WaitStatesNeeded;
unsigned Reg = Def.getReg();
auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) {
int DataIdx = createsVALUHazard(*MI);
return DataIdx >= 0 &&
TRI->regsOverlap(MI->getOperand(DataIdx).getReg(), Reg);
};
int WaitStatesNeededForDef =
VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
// This checks for the hazard where VMEM instructions that store more than
// 8 bytes can have there store data over written by the next instruction.
if (!ST.has12DWordStoreHazard())
return 0;
const MachineRegisterInfo &MRI = MF.getRegInfo();
int WaitStatesNeeded = 0;
for (const MachineOperand &Def : VALU->defs()) {
WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI));
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
// This checks for hazards associated with inline asm statements.
// Since inline asms can contain just about anything, we use this
// to call/leverage other check*Hazard routines. Note that
// this function doesn't attempt to address all possible inline asm
// hazards (good luck), but is a collection of what has been
// problematic thus far.
// see checkVALUHazards()
if (!ST.has12DWordStoreHazard())
return 0;
const MachineRegisterInfo &MRI = MF.getRegInfo();
int WaitStatesNeeded = 0;
for (unsigned I = InlineAsm::MIOp_FirstOperand, E = IA->getNumOperands();
I != E; ++I) {
const MachineOperand &Op = IA->getOperand(I);
if (Op.isReg() && Op.isDef()) {
WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
}
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const MachineOperand *LaneSelectOp =
TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
return 0;
unsigned LaneSelectReg = LaneSelectOp->getReg();
auto IsHazardFn = [TII] (MachineInstr *MI) {
return TII->isVALU(*MI);
};
const int RWLaneWaitStates = 4;
int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn,
RWLaneWaitStates);
return RWLaneWaitStates - WaitStatesSince;
}
int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
if (!ST.hasRFEHazards())
return 0;
const SIInstrInfo *TII = ST.getInstrInfo();
const int RFEWaitStates = 1;
auto IsHazardFn = [TII] (MachineInstr *MI) {
return getHWReg(TII, *MI) == AMDGPU::Hwreg::ID_TRAPSTS;
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, RFEWaitStates);
return RFEWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkAnyInstHazards(MachineInstr *MI) {
if (MI->isDebugInstr())
return 0;
const SIRegisterInfo *TRI = ST.getRegisterInfo();
if (!ST.hasSMovFedHazard())
return 0;
// Check for any instruction reading an SGPR after a write from
// s_mov_fed_b32.
int MovFedWaitStates = 1;
int WaitStatesNeeded = 0;
for (const MachineOperand &Use : MI->uses()) {
if (!Use.isReg() || TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
auto IsHazardFn = [] (MachineInstr *MI) {
return MI->getOpcode() == AMDGPU::S_MOV_FED_B32;
};
int WaitStatesNeededForUse =
MovFedWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardFn,
MovFedWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
const SIInstrInfo *TII = ST.getInstrInfo();
const int SMovRelWaitStates = 1;
auto IsHazardFn = [TII] (MachineInstr *MI) {
return TII->isSALU(*MI);
};
return SMovRelWaitStates - getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn,
SMovRelWaitStates);
}
void GCNHazardRecognizer::fixHazards(MachineInstr *MI) {
fixVMEMtoScalarWriteHazards(MI);
fixVcmpxPermlaneHazards(MI);
fixSMEMtoVectorWriteHazards(MI);
fixVcmpxExecWARHazard(MI);
fixLdsBranchVmemWARHazard(MI);
}
bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) {
if (!ST.hasVcmpxPermlaneHazard() || !isPermlane(*MI))
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
auto IsHazardFn = [TII] (MachineInstr *MI) {
return TII->isVOPC(*MI);
};
auto IsExpiredFn = [] (MachineInstr *MI, int) {
if (!MI)
return false;
unsigned Opc = MI->getOpcode();
return SIInstrInfo::isVALU(*MI) &&
Opc != AMDGPU::V_NOP_e32 &&
Opc != AMDGPU::V_NOP_e64 &&
Opc != AMDGPU::V_NOP_sdwa;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
// V_NOP will be discarded by SQ.
// Use V_MOB_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE*
// which is always a VGPR and available.
auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
unsigned Reg = Src0->getReg();
bool IsUndef = Src0->isUndef();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32))
.addReg(Reg, RegState::Define | (IsUndef ? RegState::Dead : 0))
.addReg(Reg, IsUndef ? RegState::Undef : RegState::Kill);
return true;
}
bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) {
if (!ST.hasVMEMtoScalarWriteHazard())
return false;
if (!SIInstrInfo::isSALU(*MI) && !SIInstrInfo::isSMRD(*MI))
return false;
if (MI->getNumDefs() == 0)
return false;
const SIRegisterInfo *TRI = ST.getRegisterInfo();
auto IsHazardFn = [TRI, MI] (MachineInstr *I) {
if (!SIInstrInfo::isVMEM(*I) && !SIInstrInfo::isDS(*I) &&
!SIInstrInfo::isFLAT(*I))
return false;
for (const MachineOperand &Def : MI->defs()) {
MachineOperand *Op = I->findRegisterUseOperand(Def.getReg(), false, TRI);
if (!Op)
continue;
return true;
}
return false;
};
auto IsExpiredFn = [] (MachineInstr *MI, int) {
return MI && (SIInstrInfo::isVALU(*MI) ||
(MI->getOpcode() == AMDGPU::S_WAITCNT &&
!MI->getOperand(0).getImm()));
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_NOP_e32));
return true;
}
bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
if (!ST.hasSMEMtoVectorWriteHazard())
return false;
if (!SIInstrInfo::isVALU(*MI))
return false;
unsigned SDSTName;
switch (MI->getOpcode()) {
case AMDGPU::V_READLANE_B32:
+ case AMDGPU::V_READLANE_B32_gfx10:
case AMDGPU::V_READFIRSTLANE_B32:
SDSTName = AMDGPU::OpName::vdst;
break;
default:
SDSTName = AMDGPU::OpName::sdst;
break;
}
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
const MachineOperand *SDST = TII->getNamedOperand(*MI, SDSTName);
if (!SDST) {
for (const auto &MO : MI->implicit_operands()) {
if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg()))) {
SDST = &MO;
break;
}
}
}
if (!SDST)
return false;
const unsigned SDSTReg = SDST->getReg();
auto IsHazardFn = [SDSTReg, TRI] (MachineInstr *I) {
return SIInstrInfo::isSMRD(*I) && I->readsRegister(SDSTReg, TRI);
};
auto IsExpiredFn = [TII, IV] (MachineInstr *MI, int) {
if (MI) {
if (TII->isSALU(*MI)) {
switch (MI->getOpcode()) {
case AMDGPU::S_SETVSKIP:
case AMDGPU::S_VERSION:
case AMDGPU::S_WAITCNT_VSCNT:
case AMDGPU::S_WAITCNT_VMCNT:
case AMDGPU::S_WAITCNT_EXPCNT:
// These instructions cannot not mitigate the hazard.
return false;
case AMDGPU::S_WAITCNT_LGKMCNT:
// Reducing lgkmcnt count to 0 always mitigates the hazard.
return (MI->getOperand(1).getImm() == 0) &&
(MI->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
case AMDGPU::S_WAITCNT: {
const int64_t Imm = MI->getOperand(0).getImm();
AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm);
return (Decoded.LgkmCnt == 0);
}
default:
// SOPP instructions cannot mitigate the hazard.
if (TII->isSOPP(*MI))
return false;
// At this point the SALU can be assumed to mitigate the hazard
// because either:
// (a) it is independent of the at risk SMEM (breaking chain),
// or
// (b) it is dependent on the SMEM, in which case an appropriate
// s_waitcnt lgkmcnt _must_ exist between it and the at risk
// SMEM instruction.
return true;
}
}
}
return false;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL)
.addImm(0);
return true;
}
bool GCNHazardRecognizer::fixVcmpxExecWARHazard(MachineInstr *MI) {
if (!ST.hasVcmpxExecWARHazard() || !SIInstrInfo::isVALU(*MI))
return false;
const SIRegisterInfo *TRI = ST.getRegisterInfo();
if (!MI->modifiesRegister(AMDGPU::EXEC, TRI))
return false;
auto IsHazardFn = [TRI] (MachineInstr *I) {
if (SIInstrInfo::isVALU(*I))
return false;
return I->readsRegister(AMDGPU::EXEC, TRI);
};
const SIInstrInfo *TII = ST.getInstrInfo();
auto IsExpiredFn = [TII, TRI] (MachineInstr *MI, int) {
if (!MI)
return false;
if (SIInstrInfo::isVALU(*MI)) {
if (TII->getNamedOperand(*MI, AMDGPU::OpName::sdst))
return true;
for (auto MO : MI->implicit_operands())
if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg())))
return true;
}
if (MI->getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
(MI->getOperand(0).getImm() & 0xfffe) == 0xfffe)
return true;
return false;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_WAITCNT_DEPCTR))
.addImm(0xfffe);
return true;
}
bool GCNHazardRecognizer::fixLdsBranchVmemWARHazard(MachineInstr *MI) {
if (!ST.hasLdsBranchVmemWARHazard())
return false;
auto IsHazardInst = [] (const MachineInstr *MI) {
if (SIInstrInfo::isDS(*MI))
return 1;
if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isSegmentSpecificFLAT(*MI))
return 2;
return 0;
};
auto InstType = IsHazardInst(MI);
if (!InstType)
return false;
auto IsExpiredFn = [&IsHazardInst] (MachineInstr *I, int) {
return I && (IsHazardInst(I) ||
(I->getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
I->getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
!I->getOperand(1).getImm()));
};
auto IsHazardFn = [InstType, &IsHazardInst] (MachineInstr *I) {
if (!I->isBranch())
return false;
auto IsHazardFn = [InstType, IsHazardInst] (MachineInstr *I) {
auto InstType2 = IsHazardInst(I);
return InstType2 && InstType != InstType2;
};
auto IsExpiredFn = [InstType, &IsHazardInst] (MachineInstr *I, int) {
if (!I)
return false;
auto InstType2 = IsHazardInst(I);
if (InstType == InstType2)
return true;
return I->getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
I->getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
!I->getOperand(1).getImm();
};
return ::getWaitStatesSince(IsHazardFn, I, IsExpiredFn) !=
std::numeric_limits<int>::max();
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_WAITCNT_VSCNT))
.addReg(AMDGPU::SGPR_NULL, RegState::Undef)
.addImm(0);
return true;
}
int GCNHazardRecognizer::checkNSAtoVMEMHazard(MachineInstr *MI) {
int NSAtoVMEMWaitStates = 1;
if (!ST.hasNSAtoVMEMBug())
return 0;
if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isMTBUF(*MI))
return 0;
const SIInstrInfo *TII = ST.getInstrInfo();
const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
if (!Offset || (Offset->getImm() & 6) == 0)
return 0;
auto IsHazardFn = [TII] (MachineInstr *I) {
if (!SIInstrInfo::isMIMG(*I))
return false;
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I->getOpcode());
return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA &&
TII->getInstSizeInBytes(*I) >= 16;
};
return NSAtoVMEMWaitStates - getWaitStatesSince(IsHazardFn, 1);
}
int GCNHazardRecognizer::checkFPAtomicToDenormModeHazard(MachineInstr *MI) {
int FPAtomicToDenormModeWaitStates = 3;
if (MI->getOpcode() != AMDGPU::S_DENORM_MODE)
return 0;
auto IsHazardFn = [] (MachineInstr *I) {
if (!SIInstrInfo::isVMEM(*I) && !SIInstrInfo::isFLAT(*I))
return false;
return SIInstrInfo::isFPAtomic(*I);
};
auto IsExpiredFn = [] (MachineInstr *MI, int WaitStates) {
if (WaitStates >= 3 || SIInstrInfo::isVALU(*MI))
return true;
switch (MI->getOpcode()) {
case AMDGPU::S_WAITCNT:
case AMDGPU::S_WAITCNT_VSCNT:
case AMDGPU::S_WAITCNT_VMCNT:
case AMDGPU::S_WAITCNT_EXPCNT:
case AMDGPU::S_WAITCNT_LGKMCNT:
case AMDGPU::S_WAITCNT_IDLE:
return true;
default:
break;
}
return false;
};
return FPAtomicToDenormModeWaitStates -
::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn);
}
int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
assert(SIInstrInfo::isMAI(*MI));
int WaitStatesNeeded = 0;
unsigned Opc = MI->getOpcode();
auto IsVALUFn = [] (MachineInstr *MI) {
return SIInstrInfo::isVALU(*MI);
};
if (Opc != AMDGPU::V_ACCVGPR_READ_B32) { // MFMA or v_accvgpr_write
const int LegacyVALUWritesVGPRWaitStates = 2;
const int VALUWritesExecWaitStates = 4;
const int MaxWaitStates = 4;
int WaitStatesNeededForUse = VALUWritesExecWaitStates -
getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded < MaxWaitStates) {
for (const MachineOperand &Use : MI->explicit_uses()) {
const int MaxWaitStates = 2;
if (!Use.isReg() || !TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse = LegacyVALUWritesVGPRWaitStates -
getWaitStatesSinceDef(Use.getReg(), IsVALUFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
break;
}
}
}
auto IsMFMAFn = [] (MachineInstr *MI) {
return SIInstrInfo::isMAI(*MI) &&
MI->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32 &&
MI->getOpcode() != AMDGPU::V_ACCVGPR_READ_B32;
};
for (const MachineOperand &Op : MI->explicit_operands()) {
if (!Op.isReg() || !TRI.isAGPR(MF.getRegInfo(), Op.getReg()))
continue;
if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32)
continue;
const int MFMAWritesAGPROverlappedSrcABWaitStates = 4;
const int MFMAWritesAGPROverlappedSrcCWaitStates = 2;
const int MFMA4x4WritesAGPRAccVgprReadWaitStates = 4;
const int MFMA16x16WritesAGPRAccVgprReadWaitStates = 10;
const int MFMA32x32WritesAGPRAccVgprReadWaitStates = 18;
const int MFMA4x4WritesAGPRAccVgprWriteWaitStates = 1;
const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7;
const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15;
const int MaxWaitStates = 18;
unsigned Reg = Op.getReg();
unsigned HazardDefLatency = 0;
auto IsOverlappedMFMAFn = [Reg, &IsMFMAFn, &HazardDefLatency, this]
(MachineInstr *MI) {
if (!IsMFMAFn(MI))
return false;
unsigned DstReg = MI->getOperand(0).getReg();
if (DstReg == Reg)
return false;
HazardDefLatency = std::max(HazardDefLatency,
TSchedModel.computeInstrLatency(MI));
return TRI.regsOverlap(DstReg, Reg);
};
int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn,
MaxWaitStates);
int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates;
int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
int OpNo = MI->getOperandNo(&Op);
if (OpNo == SrcCIdx) {
NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates;
} else if (Opc == AMDGPU::V_ACCVGPR_READ_B32) {
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprReadWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
break;
}
} else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32) {
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprWriteWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
break;
}
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
auto IsAccVgprWriteFn = [Reg, this] (MachineInstr *MI) {
if (MI->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32)
return false;
unsigned DstReg = MI->getOperand(0).getReg();
return TRI.regsOverlap(Reg, DstReg);
};
const int AccVGPRWriteMFMAReadSrcCWaitStates = 1;
const int AccVGPRWriteMFMAReadSrcABWaitStates = 3;
const int AccVGPRWriteAccVgprReadWaitStates = 3;
NeedWaitStates = AccVGPRWriteMFMAReadSrcABWaitStates;
if (OpNo == SrcCIdx)
NeedWaitStates = AccVGPRWriteMFMAReadSrcCWaitStates;
else if (Opc == AMDGPU::V_ACCVGPR_READ_B32)
NeedWaitStates = AccVGPRWriteAccVgprReadWaitStates;
WaitStatesNeededForUse = NeedWaitStates -
getWaitStatesSinceDef(Reg, IsAccVgprWriteFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
}
if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32) {
const int MFMA4x4ReadSrcCAccVgprWriteWaitStates = 0;
const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5;
const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13;
const int MaxWaitStates = 13;
unsigned DstReg = MI->getOperand(0).getReg();
unsigned HazardDefLatency = 0;
auto IsSrcCMFMAFn = [DstReg, &IsMFMAFn, &HazardDefLatency, this]
(MachineInstr *MI) {
if (!IsMFMAFn(MI))
return false;
unsigned Reg = TII.getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg();
HazardDefLatency = std::max(HazardDefLatency,
TSchedModel.computeInstrLatency(MI));
return TRI.regsOverlap(Reg, DstReg);
};
int WaitStatesSince = getWaitStatesSince(IsSrcCMFMAFn, MaxWaitStates);
int NeedWaitStates;
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4ReadSrcCAccVgprWriteWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
break;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSince;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) {
if (!ST.hasMAIInsts())
return 0;
int WaitStatesNeeded = 0;
auto IsAccVgprReadFn = [] (MachineInstr *MI) {
return MI->getOpcode() == AMDGPU::V_ACCVGPR_READ_B32;
};
for (const MachineOperand &Op : MI->explicit_uses()) {
if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg()))
continue;
unsigned Reg = Op.getReg();
const int AccVgprReadLdStWaitStates = 2;
const int VALUWriteAccVgprReadLdStDepVALUWaitStates = 1;
const int MaxWaitStates = 2;
int WaitStatesNeededForUse = AccVgprReadLdStWaitStates -
getWaitStatesSinceDef(Reg, IsAccVgprReadFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
auto IsVALUAccVgprReadCheckFn = [Reg, this] (MachineInstr *MI) {
if (MI->getOpcode() != AMDGPU::V_ACCVGPR_READ_B32)
return false;
auto IsVALUFn = [] (MachineInstr *MI) {
return SIInstrInfo::isVALU(*MI) && !SIInstrInfo::isMAI(*MI);
};
return getWaitStatesSinceDef(Reg, IsVALUFn, 2 /*MaxWaitStates*/) <
std::numeric_limits<int>::max();
};
WaitStatesNeededForUse = VALUWriteAccVgprReadLdStDepVALUWaitStates -
getWaitStatesSince(IsVALUAccVgprReadCheckFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td (revision 356465)
@@ -1,1248 +1,1254 @@
//===-- ARM.td - Describe the ARM Target Machine -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Target-independent interfaces which we are implementing
//===----------------------------------------------------------------------===//
include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
// ARM Subtarget state.
//
def ModeThumb : SubtargetFeature<"thumb-mode", "InThumbMode",
"true", "Thumb mode">;
def ModeSoftFloat : SubtargetFeature<"soft-float","UseSoftFloat",
"true", "Use software floating "
"point features.">;
//===----------------------------------------------------------------------===//
// ARM Subtarget features.
//
// Floating Point, HW Division and Neon Support
// FP loads/stores/moves, shared between VFP and MVE (even in the integer-only
// version).
def FeatureFPRegs : SubtargetFeature<"fpregs", "HasFPRegs", "true",
"Enable FP registers">;
// 16-bit FP loads/stores/moves, shared between VFP (with the v8.2A FP16
// extension) and MVE (even in the integer-only version).
def FeatureFPRegs16 : SubtargetFeature<"fpregs16", "HasFPRegs16", "true",
"Enable 16-bit FP registers",
[FeatureFPRegs]>;
def FeatureFPRegs64 : SubtargetFeature<"fpregs64", "HasFPRegs64", "true",
"Enable 64-bit FP registers",
[FeatureFPRegs]>;
def FeatureFP64 : SubtargetFeature<"fp64", "HasFP64", "true",
"Floating point unit supports "
"double precision",
[FeatureFPRegs64]>;
def FeatureD32 : SubtargetFeature<"d32", "HasD32", "true",
"Extend FP to 32 double registers">;
multiclass VFPver<string name, string query, string description,
list<SubtargetFeature> prev,
list<SubtargetFeature> otherimplies,
list<SubtargetFeature> vfp2prev = []> {
def _D16_SP: SubtargetFeature<
name#"d16sp", query#"D16SP", "true",
description#" with only 16 d-registers and no double precision",
!foreach(v, prev, !cast<SubtargetFeature>(v # "_D16_SP")) #
!foreach(v, vfp2prev, !cast<SubtargetFeature>(v # "_SP")) #
otherimplies>;
def _SP: SubtargetFeature<
name#"sp", query#"SP", "true",
description#" with no double precision",
!foreach(v, prev, !cast<SubtargetFeature>(v # "_SP")) #
otherimplies # [FeatureD32, !cast<SubtargetFeature>(NAME # "_D16_SP")]>;
def _D16: SubtargetFeature<
name#"d16", query#"D16", "true",
description#" with only 16 d-registers",
!foreach(v, prev, !cast<SubtargetFeature>(v # "_D16")) #
vfp2prev #
otherimplies # [FeatureFP64, !cast<SubtargetFeature>(NAME # "_D16_SP")]>;
def "": SubtargetFeature<
name, query, "true", description,
prev # otherimplies # [
!cast<SubtargetFeature>(NAME # "_D16"),
!cast<SubtargetFeature>(NAME # "_SP")]>;
}
-def FeatureVFP2_SP : SubtargetFeature<"vfp2sp", "HasVFPv2SP", "true",
+def FeatureVFP2_D16_SP : SubtargetFeature<"vfp2d16sp", "HasVFPv2D16SP", "true",
"Enable VFP2 instructions with "
"no double precision",
[FeatureFPRegs]>;
-
+def FeatureVFP2_SP : SubtargetFeature<"vfp2sp", "HasVFPv2SP", "true",
+ "Enable VFP2 instructions with "
+ "no double precision",
+ [FeatureVFP2_D16_SP]>;
+def FeatureVFP2_D16 : SubtargetFeature<"vfp2d16", "HasVFPv2D16", "true",
+ "Enable VFP2 instructions",
+ [FeatureFP64, FeatureVFP2_D16_SP]>;
def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFPv2", "true",
"Enable VFP2 instructions",
- [FeatureFP64, FeatureVFP2_SP]>;
+ [FeatureVFP2_D16, FeatureVFP2_SP]>;
defm FeatureVFP3: VFPver<"vfp3", "HasVFPv3", "Enable VFP3 instructions",
[], [], [FeatureVFP2]>;
def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
"Enable NEON instructions",
[FeatureVFP3]>;
def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true",
"Enable half-precision "
"floating point">;
defm FeatureVFP4: VFPver<"vfp4", "HasVFPv4", "Enable VFP4 instructions",
[FeatureVFP3], [FeatureFP16]>;
defm FeatureFPARMv8: VFPver<"fp-armv8", "HasFPARMv8", "Enable ARMv8 FP",
[FeatureVFP4], []>;
def FeatureFullFP16 : SubtargetFeature<"fullfp16", "HasFullFP16", "true",
"Enable full half-precision "
"floating point",
[FeatureFPARMv8_D16_SP, FeatureFPRegs16]>;
def FeatureFP16FML : SubtargetFeature<"fp16fml", "HasFP16FML", "true",
"Enable full half-precision "
"floating point fml instructions",
[FeatureFullFP16]>;
def FeatureHWDivThumb : SubtargetFeature<"hwdiv",
"HasHardwareDivideInThumb", "true",
"Enable divide instructions in Thumb">;
def FeatureHWDivARM : SubtargetFeature<"hwdiv-arm",
"HasHardwareDivideInARM", "true",
"Enable divide instructions in ARM mode">;
// Atomic Support
def FeatureDB : SubtargetFeature<"db", "HasDataBarrier", "true",
"Has data barrier (dmb/dsb) instructions">;
def FeatureV7Clrex : SubtargetFeature<"v7clrex", "HasV7Clrex", "true",
"Has v7 clrex instruction">;
def FeatureDFB : SubtargetFeature<"dfb", "HasFullDataBarrier", "true",
"Has full data barrier (dfb) instruction">;
def FeatureAcquireRelease : SubtargetFeature<"acquire-release",
"HasAcquireRelease", "true",
"Has v8 acquire/release (lda/ldaex "
" etc) instructions">;
def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true",
"FP compare + branch is slow">;
def FeaturePerfMon : SubtargetFeature<"perfmon", "HasPerfMon", "true",
"Enable support for Performance "
"Monitor extensions">;
// TrustZone Security Extensions
def FeatureTrustZone : SubtargetFeature<"trustzone", "HasTrustZone", "true",
"Enable support for TrustZone "
"security extensions">;
def Feature8MSecExt : SubtargetFeature<"8msecext", "Has8MSecExt", "true",
"Enable support for ARMv8-M "
"Security Extensions">;
def FeatureSHA2 : SubtargetFeature<"sha2", "HasSHA2", "true",
"Enable SHA1 and SHA256 support", [FeatureNEON]>;
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
"Enable AES support", [FeatureNEON]>;
def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
"Enable support for "
"Cryptography extensions",
[FeatureNEON, FeatureSHA2, FeatureAES]>;
def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
"Enable support for CRC instructions">;
def FeatureDotProd : SubtargetFeature<"dotprod", "HasDotProd", "true",
"Enable support for dot product instructions",
[FeatureNEON]>;
// Not to be confused with FeatureHasRetAddrStack (return address stack)
def FeatureRAS : SubtargetFeature<"ras", "HasRAS", "true",
"Enable Reliability, Availability "
"and Serviceability extensions">;
// Fast computation of non-negative address offsets
def FeatureFPAO : SubtargetFeature<"fpao", "HasFPAO", "true",
"Enable fast computation of "
"positive address offsets">;
// Fast execution of AES crypto operations
def FeatureFuseAES : SubtargetFeature<"fuse-aes", "HasFuseAES", "true",
"CPU fuses AES crypto operations">;
// Fast execution of bottom and top halves of literal generation
def FeatureFuseLiterals : SubtargetFeature<"fuse-literals", "HasFuseLiterals", "true",
"CPU fuses literal generation operations">;
// The way of reading thread pointer
def FeatureReadTp : SubtargetFeature<"read-tp-hard", "ReadTPHard", "true",
"Reading thread pointer from register">;
// Cyclone can zero VFP registers in 0 cycles.
def FeatureZCZeroing : SubtargetFeature<"zcz", "HasZeroCycleZeroing", "true",
"Has zero-cycle zeroing instructions">;
// Whether it is profitable to unpredicate certain instructions during if-conversion
def FeatureProfUnpredicate : SubtargetFeature<"prof-unpr",
"IsProfitableToUnpredicate", "true",
"Is profitable to unpredicate">;
// Some targets (e.g. Swift) have microcoded VGETLNi32.
def FeatureSlowVGETLNi32 : SubtargetFeature<"slow-vgetlni32",
"HasSlowVGETLNi32", "true",
"Has slow VGETLNi32 - prefer VMOV">;
// Some targets (e.g. Swift) have microcoded VDUP32.
def FeatureSlowVDUP32 : SubtargetFeature<"slow-vdup32", "HasSlowVDUP32",
"true",
"Has slow VDUP32 - prefer VMOV">;
// Some targets (e.g. Cortex-A9) prefer VMOVSR to VMOVDRR even when using NEON
// for scalar FP, as this allows more effective execution domain optimization.
def FeaturePreferVMOVSR : SubtargetFeature<"prefer-vmovsr", "PreferVMOVSR",
"true", "Prefer VMOVSR">;
// Swift has ISHST barriers compatible with Atomic Release semantics but weaker
// than ISH
def FeaturePrefISHSTBarrier : SubtargetFeature<"prefer-ishst", "PreferISHST",
"true", "Prefer ISHST barriers">;
// Some targets (e.g. Cortex-A9) have muxed AGU and NEON/FPU.
def FeatureMuxedUnits : SubtargetFeature<"muxed-units", "HasMuxedUnits",
"true",
"Has muxed AGU and NEON/FPU">;
// Whether VLDM/VSTM starting with odd register number need more microops
// than single VLDRS
def FeatureSlowOddRegister : SubtargetFeature<"slow-odd-reg", "SlowOddRegister",
"true", "VLDM/VSTM starting "
"with an odd register is slow">;
// Some targets have a renaming dependency when loading into D subregisters.
def FeatureSlowLoadDSubreg : SubtargetFeature<"slow-load-D-subreg",
"SlowLoadDSubregister", "true",
"Loading into D subregs is slow">;
def FeatureUseWideStrideVFP : SubtargetFeature<"wide-stride-vfp",
"UseWideStrideVFP", "true",
"Use a wide stride when allocating VFP registers">;
// Some targets (e.g. Cortex-A15) never want VMOVS to be widened to VMOVD.
def FeatureDontWidenVMOVS : SubtargetFeature<"dont-widen-vmovs",
"DontWidenVMOVS", "true",
"Don't widen VMOVS to VMOVD">;
// Some targets (e.g. Cortex-A15) prefer to avoid mixing operations on different
// VFP register widths.
def FeatureSplatVFPToNeon : SubtargetFeature<"splat-vfp-neon",
"SplatVFPToNeon", "true",
"Splat register from VFP to NEON",
[FeatureDontWidenVMOVS]>;
// Whether or not it is profitable to expand VFP/NEON MLA/MLS instructions.
def FeatureExpandMLx : SubtargetFeature<"expand-fp-mlx",
"ExpandMLx", "true",
"Expand VFP/NEON MLA/MLS instructions">;
// Some targets have special RAW hazards for VFP/NEON VMLA/VMLS.
def FeatureHasVMLxHazards : SubtargetFeature<"vmlx-hazards", "HasVMLxHazards",
"true", "Has VMLx hazards">;
// Some targets (e.g. Cortex-A9) want to convert VMOVRS, VMOVSR and VMOVS from
// VFP to NEON, as an execution domain optimization.
def FeatureNEONForFPMovs : SubtargetFeature<"neon-fpmovs",
"UseNEONForFPMovs", "true",
"Convert VMOVSR, VMOVRS, "
"VMOVS to NEON">;
// Some processors benefit from using NEON instructions for scalar
// single-precision FP operations. This affects instruction selection and should
// only be enabled if the handling of denormals is not important.
def FeatureNEONForFP : SubtargetFeature<"neonfp",
"UseNEONForSinglePrecisionFP",
"true",
"Use NEON for single precision FP">;
// On some processors, VLDn instructions that access unaligned data take one
// extra cycle. Take that into account when computing operand latencies.
def FeatureCheckVLDnAlign : SubtargetFeature<"vldn-align", "CheckVLDnAlign",
"true",
"Check for VLDn unaligned access">;
// Some processors have a nonpipelined VFP coprocessor.
def FeatureNonpipelinedVFP : SubtargetFeature<"nonpipelined-vfp",
"NonpipelinedVFP", "true",
"VFP instructions are not pipelined">;
// Some processors have FP multiply-accumulate instructions that don't
// play nicely with other VFP / NEON instructions, and it's generally better
// to just not use them.
def FeatureHasSlowFPVMLx : SubtargetFeature<"slowfpvmlx", "SlowFPVMLx", "true",
"Disable VFP / NEON MAC instructions">;
// Cortex-A8 / A9 Advanced SIMD has multiplier accumulator forwarding.
def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
"HasVMLxForwarding", "true",
"Has multiplier accumulator forwarding">;
// Disable 32-bit to 16-bit narrowing for experimentation.
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
"Prefer 32-bit Thumb instrs">;
def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2",
"Prefer 32-bit alignment for loops">;
/// Some instructions update CPSR partially, which can add false dependency for
/// out-of-order implementation, e.g. Cortex-A9, unless each individual bit is
/// mapped to a separate physical register. Avoid partial CPSR update for these
/// processors.
def FeatureAvoidPartialCPSR : SubtargetFeature<"avoid-partial-cpsr",
"AvoidCPSRPartialUpdate", "true",
"Avoid CPSR partial update for OOO execution">;
/// Disable +1 predication cost for instructions updating CPSR.
/// Enabled for Cortex-A57.
def FeatureCheapPredicableCPSR : SubtargetFeature<"cheap-predicable-cpsr",
"CheapPredicableCPSRDef",
"true",
"Disable +1 predication cost for instructions updating CPSR">;
def FeatureAvoidMOVsShOp : SubtargetFeature<"avoid-movs-shop",
"AvoidMOVsShifterOperand", "true",
"Avoid movs instructions with "
"shifter operand">;
// Some processors perform return stack prediction. CodeGen should avoid issue
// "normal" call instructions to callees which do not return.
def FeatureHasRetAddrStack : SubtargetFeature<"ret-addr-stack",
"HasRetAddrStack", "true",
"Has return address stack">;
// Some processors have no branch predictor, which changes the expected cost of
// taking a branch which affects the choice of whether to use predicated
// instructions.
def FeatureHasNoBranchPredictor : SubtargetFeature<"no-branch-predictor",
"HasBranchPredictor", "false",
"Has no branch predictor">;
/// DSP extension.
def FeatureDSP : SubtargetFeature<"dsp", "HasDSP", "true",
"Supports DSP instructions in "
"ARM and/or Thumb2">;
// Multiprocessing extension.
def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true",
"Supports Multiprocessing extension">;
// Virtualization extension - requires HW divide (ARMv7-AR ARMARM - 4.4.8).
def FeatureVirtualization : SubtargetFeature<"virtualization",
"HasVirtualization", "true",
"Supports Virtualization extension",
[FeatureHWDivThumb, FeatureHWDivARM]>;
// Special TRAP encoding for NaCl, which looks like a TRAP in Thumb too.
// See ARMInstrInfo.td for details.
def FeatureNaClTrap : SubtargetFeature<"nacl-trap", "UseNaClTrap", "true",
"NaCl trap">;
def FeatureStrictAlign : SubtargetFeature<"strict-align",
"StrictAlign", "true",
"Disallow all unaligned memory "
"access">;
def FeatureLongCalls : SubtargetFeature<"long-calls", "GenLongCalls", "true",
"Generate calls via indirect call "
"instructions">;
def FeatureExecuteOnly : SubtargetFeature<"execute-only",
"GenExecuteOnly", "true",
"Enable the generation of "
"execute only code.">;
def FeatureReserveR9 : SubtargetFeature<"reserve-r9", "ReserveR9", "true",
"Reserve R9, making it unavailable"
" as GPR">;
def FeatureNoMovt : SubtargetFeature<"no-movt", "NoMovt", "true",
"Don't use movt/movw pairs for "
"32-bit imms">;
def FeatureNoNegativeImmediates
: SubtargetFeature<"no-neg-immediates",
"NegativeImmediates", "false",
"Convert immediates and instructions "
"to their negated or complemented "
"equivalent when the immediate does "
"not fit in the encoding.">;
// Use the MachineScheduler for instruction scheduling for the subtarget.
def FeatureUseMISched: SubtargetFeature<"use-misched", "UseMISched", "true",
"Use the MachineScheduler">;
def FeatureNoPostRASched : SubtargetFeature<"disable-postra-scheduler",
"DisablePostRAScheduler", "true",
"Don't schedule again after register allocation">;
// Enable use of alias analysis during code generation
def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true",
"Use alias analysis during codegen">;
// Armv8.5-A extensions
def FeatureSB : SubtargetFeature<"sb", "HasSB", "true",
"Enable v8.5a Speculation Barrier" >;
// Armv8.1-M extensions
def FeatureLOB : SubtargetFeature<"lob", "HasLOB", "true",
"Enable Low Overhead Branch "
"extensions">;
//===----------------------------------------------------------------------===//
// ARM architecture class
//
// A-series ISA
def FeatureAClass : SubtargetFeature<"aclass", "ARMProcClass", "AClass",
"Is application profile ('A' series)">;
// R-series ISA
def FeatureRClass : SubtargetFeature<"rclass", "ARMProcClass", "RClass",
"Is realtime profile ('R' series)">;
// M-series ISA
def FeatureMClass : SubtargetFeature<"mclass", "ARMProcClass", "MClass",
"Is microcontroller profile ('M' series)">;
def FeatureThumb2 : SubtargetFeature<"thumb2", "HasThumb2", "true",
"Enable Thumb2 instructions">;
def FeatureNoARM : SubtargetFeature<"noarm", "NoARM", "true",
"Does not support ARM mode execution">;
//===----------------------------------------------------------------------===//
// ARM ISAa.
//
def HasV4TOps : SubtargetFeature<"v4t", "HasV4TOps", "true",
"Support ARM v4T instructions">;
def HasV5TOps : SubtargetFeature<"v5t", "HasV5TOps", "true",
"Support ARM v5T instructions",
[HasV4TOps]>;
def HasV5TEOps : SubtargetFeature<"v5te", "HasV5TEOps", "true",
"Support ARM v5TE, v5TEj, and "
"v5TExp instructions",
[HasV5TOps]>;
def HasV6Ops : SubtargetFeature<"v6", "HasV6Ops", "true",
"Support ARM v6 instructions",
[HasV5TEOps]>;
def HasV6MOps : SubtargetFeature<"v6m", "HasV6MOps", "true",
"Support ARM v6M instructions",
[HasV6Ops]>;
def HasV8MBaselineOps : SubtargetFeature<"v8m", "HasV8MBaselineOps", "true",
"Support ARM v8M Baseline instructions",
[HasV6MOps]>;
def HasV6KOps : SubtargetFeature<"v6k", "HasV6KOps", "true",
"Support ARM v6k instructions",
[HasV6Ops]>;
def HasV6T2Ops : SubtargetFeature<"v6t2", "HasV6T2Ops", "true",
"Support ARM v6t2 instructions",
[HasV8MBaselineOps, HasV6KOps, FeatureThumb2]>;
def HasV7Ops : SubtargetFeature<"v7", "HasV7Ops", "true",
"Support ARM v7 instructions",
[HasV6T2Ops, FeaturePerfMon,
FeatureV7Clrex]>;
def HasV8MMainlineOps :
SubtargetFeature<"v8m.main", "HasV8MMainlineOps", "true",
"Support ARM v8M Mainline instructions",
[HasV7Ops]>;
def HasV8Ops : SubtargetFeature<"v8", "HasV8Ops", "true",
"Support ARM v8 instructions",
[HasV7Ops, FeatureAcquireRelease]>;
def HasV8_1aOps : SubtargetFeature<"v8.1a", "HasV8_1aOps", "true",
"Support ARM v8.1a instructions",
[HasV8Ops]>;
def HasV8_2aOps : SubtargetFeature<"v8.2a", "HasV8_2aOps", "true",
"Support ARM v8.2a instructions",
[HasV8_1aOps]>;
def HasV8_3aOps : SubtargetFeature<"v8.3a", "HasV8_3aOps", "true",
"Support ARM v8.3a instructions",
[HasV8_2aOps]>;
def HasV8_4aOps : SubtargetFeature<"v8.4a", "HasV8_4aOps", "true",
"Support ARM v8.4a instructions",
[HasV8_3aOps, FeatureDotProd]>;
def HasV8_5aOps : SubtargetFeature<"v8.5a", "HasV8_5aOps", "true",
"Support ARM v8.5a instructions",
[HasV8_4aOps, FeatureSB]>;
def HasV8_1MMainlineOps : SubtargetFeature<
"v8.1m.main", "HasV8_1MMainlineOps", "true",
"Support ARM v8-1M Mainline instructions",
[HasV8MMainlineOps]>;
def HasMVEIntegerOps : SubtargetFeature<
"mve", "HasMVEIntegerOps", "true",
"Support M-Class Vector Extension with integer ops",
[HasV8_1MMainlineOps, FeatureDSP, FeatureFPRegs16, FeatureFPRegs64]>;
def HasMVEFloatOps : SubtargetFeature<
"mve.fp", "HasMVEFloatOps", "true",
"Support M-Class Vector Extension with integer and floating ops",
[HasMVEIntegerOps, FeatureFPARMv8_D16_SP, FeatureFullFP16]>;
//===----------------------------------------------------------------------===//
// ARM Processor subtarget features.
//
def ProcA5 : SubtargetFeature<"a5", "ARMProcFamily", "CortexA5",
"Cortex-A5 ARM processors", []>;
def ProcA7 : SubtargetFeature<"a7", "ARMProcFamily", "CortexA7",
"Cortex-A7 ARM processors", []>;
def ProcA8 : SubtargetFeature<"a8", "ARMProcFamily", "CortexA8",
"Cortex-A8 ARM processors", []>;
def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9",
"Cortex-A9 ARM processors", []>;
def ProcA12 : SubtargetFeature<"a12", "ARMProcFamily", "CortexA12",
"Cortex-A12 ARM processors", []>;
def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
"Cortex-A15 ARM processors", []>;
def ProcA17 : SubtargetFeature<"a17", "ARMProcFamily", "CortexA17",
"Cortex-A17 ARM processors", []>;
def ProcA32 : SubtargetFeature<"a32", "ARMProcFamily", "CortexA32",
"Cortex-A32 ARM processors", []>;
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
"Cortex-A35 ARM processors", []>;
def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
"Cortex-A53 ARM processors", []>;
def ProcA55 : SubtargetFeature<"a55", "ARMProcFamily", "CortexA55",
"Cortex-A55 ARM processors", []>;
def ProcA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
"Cortex-A57 ARM processors", []>;
def ProcA72 : SubtargetFeature<"a72", "ARMProcFamily", "CortexA72",
"Cortex-A72 ARM processors", []>;
def ProcA73 : SubtargetFeature<"a73", "ARMProcFamily", "CortexA73",
"Cortex-A73 ARM processors", []>;
def ProcA75 : SubtargetFeature<"a75", "ARMProcFamily", "CortexA75",
"Cortex-A75 ARM processors", []>;
def ProcA76 : SubtargetFeature<"a76", "ARMProcFamily", "CortexA76",
"Cortex-A76 ARM processors", []>;
def ProcKrait : SubtargetFeature<"krait", "ARMProcFamily", "Krait",
"Qualcomm Krait processors", []>;
def ProcKryo : SubtargetFeature<"kryo", "ARMProcFamily", "Kryo",
"Qualcomm Kryo processors", []>;
def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift",
"Swift ARM processors", []>;
def ProcExynos : SubtargetFeature<"exynos", "ARMProcFamily", "Exynos",
"Samsung Exynos processors",
[FeatureZCZeroing,
FeatureUseWideStrideVFP,
FeatureUseAA,
FeatureSplatVFPToNeon,
FeatureSlowVGETLNi32,
FeatureSlowVDUP32,
FeatureSlowFPBrcc,
FeatureProfUnpredicate,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureHasSlowFPVMLx,
FeatureHasRetAddrStack,
FeatureFuseLiterals,
FeatureFuseAES,
FeatureExpandMLx,
FeatureCrypto,
FeatureCRC]>;
def ProcR4 : SubtargetFeature<"r4", "ARMProcFamily", "CortexR4",
"Cortex-R4 ARM processors", []>;
def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5",
"Cortex-R5 ARM processors", []>;
def ProcR7 : SubtargetFeature<"r7", "ARMProcFamily", "CortexR7",
"Cortex-R7 ARM processors", []>;
def ProcR52 : SubtargetFeature<"r52", "ARMProcFamily", "CortexR52",
"Cortex-R52 ARM processors", []>;
def ProcM3 : SubtargetFeature<"m3", "ARMProcFamily", "CortexM3",
"Cortex-M3 ARM processors", []>;
//===----------------------------------------------------------------------===//
// ARM Helper classes.
//
class Architecture<string fname, string aname, list<SubtargetFeature> features>
: SubtargetFeature<fname, "ARMArch", aname,
!strconcat(aname, " architecture"), features>;
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
//===----------------------------------------------------------------------===//
// ARM architectures
//
def ARMv2 : Architecture<"armv2", "ARMv2", []>;
def ARMv2a : Architecture<"armv2a", "ARMv2a", []>;
def ARMv3 : Architecture<"armv3", "ARMv3", []>;
def ARMv3m : Architecture<"armv3m", "ARMv3m", []>;
def ARMv4 : Architecture<"armv4", "ARMv4", []>;
def ARMv4t : Architecture<"armv4t", "ARMv4t", [HasV4TOps]>;
def ARMv5t : Architecture<"armv5t", "ARMv5t", [HasV5TOps]>;
def ARMv5te : Architecture<"armv5te", "ARMv5te", [HasV5TEOps]>;
def ARMv5tej : Architecture<"armv5tej", "ARMv5tej", [HasV5TEOps]>;
def ARMv6 : Architecture<"armv6", "ARMv6", [HasV6Ops,
FeatureDSP]>;
def ARMv6t2 : Architecture<"armv6t2", "ARMv6t2", [HasV6T2Ops,
FeatureDSP]>;
def ARMv6k : Architecture<"armv6k", "ARMv6k", [HasV6KOps]>;
def ARMv6kz : Architecture<"armv6kz", "ARMv6kz", [HasV6KOps,
FeatureTrustZone]>;
def ARMv6m : Architecture<"armv6-m", "ARMv6m", [HasV6MOps,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureMClass,
FeatureStrictAlign]>;
def ARMv6sm : Architecture<"armv6s-m", "ARMv6sm", [HasV6MOps,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureMClass,
FeatureStrictAlign]>;
def ARMv7a : Architecture<"armv7-a", "ARMv7a", [HasV7Ops,
FeatureNEON,
FeatureDB,
FeatureDSP,
FeatureAClass]>;
def ARMv7ve : Architecture<"armv7ve", "ARMv7ve", [HasV7Ops,
FeatureNEON,
FeatureDB,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureAClass]>;
def ARMv7r : Architecture<"armv7-r", "ARMv7r", [HasV7Ops,
FeatureDB,
FeatureDSP,
FeatureHWDivThumb,
FeatureRClass]>;
def ARMv7m : Architecture<"armv7-m", "ARMv7m", [HasV7Ops,
FeatureThumb2,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureHWDivThumb,
FeatureMClass]>;
def ARMv7em : Architecture<"armv7e-m", "ARMv7em", [HasV7Ops,
FeatureThumb2,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureHWDivThumb,
FeatureMClass,
FeatureDSP]>;
def ARMv8a : Architecture<"armv8-a", "ARMv8a", [HasV8Ops,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC]>;
def ARMv81a : Architecture<"armv8.1-a", "ARMv81a", [HasV8_1aOps,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC]>;
def ARMv82a : Architecture<"armv8.2-a", "ARMv82a", [HasV8_2aOps,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC,
FeatureRAS]>;
def ARMv83a : Architecture<"armv8.3-a", "ARMv83a", [HasV8_3aOps,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC,
FeatureRAS]>;
def ARMv84a : Architecture<"armv8.4-a", "ARMv84a", [HasV8_4aOps,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC,
FeatureRAS,
FeatureDotProd]>;
def ARMv85a : Architecture<"armv8.5-a", "ARMv85a", [HasV8_5aOps,
FeatureAClass,
FeatureDB,
FeatureFPARMv8,
FeatureNEON,
FeatureDSP,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
FeatureCRC,
FeatureRAS,
FeatureDotProd]>;
def ARMv8r : Architecture<"armv8-r", "ARMv8r", [HasV8Ops,
FeatureRClass,
FeatureDB,
FeatureDFB,
FeatureDSP,
FeatureCRC,
FeatureMP,
FeatureVirtualization,
FeatureFPARMv8,
FeatureNEON]>;
def ARMv8mBaseline : Architecture<"armv8-m.base", "ARMv8mBaseline",
[HasV8MBaselineOps,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureHWDivThumb,
FeatureV7Clrex,
Feature8MSecExt,
FeatureAcquireRelease,
FeatureMClass,
FeatureStrictAlign]>;
def ARMv8mMainline : Architecture<"armv8-m.main", "ARMv8mMainline",
[HasV8MMainlineOps,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureHWDivThumb,
Feature8MSecExt,
FeatureAcquireRelease,
FeatureMClass]>;
def ARMv81mMainline : Architecture<"armv8.1-m.main", "ARMv81mMainline",
[HasV8_1MMainlineOps,
FeatureNoARM,
ModeThumb,
FeatureDB,
FeatureHWDivThumb,
Feature8MSecExt,
FeatureAcquireRelease,
FeatureMClass,
FeatureRAS,
FeatureLOB]>;
// Aliases
def IWMMXT : Architecture<"iwmmxt", "ARMv5te", [ARMv5te]>;
def IWMMXT2 : Architecture<"iwmmxt2", "ARMv5te", [ARMv5te]>;
def XScale : Architecture<"xscale", "ARMv5te", [ARMv5te]>;
def ARMv6j : Architecture<"armv6j", "ARMv7a", [ARMv6]>;
def ARMv7k : Architecture<"armv7k", "ARMv7a", [ARMv7a]>;
def ARMv7s : Architecture<"armv7s", "ARMv7a", [ARMv7a]>;
//===----------------------------------------------------------------------===//
// ARM schedules.
//===----------------------------------------------------------------------===//
//
include "ARMPredicates.td"
include "ARMSchedule.td"
//===----------------------------------------------------------------------===//
// ARM processors
//
// Dummy CPU, used to target architectures
def : ProcessorModel<"generic", CortexA8Model, []>;
// FIXME: Several processors below are not using their own scheduler
// model, but one of similar/previous processor. These should be fixed.
def : ProcNoItin<"arm8", [ARMv4]>;
def : ProcNoItin<"arm810", [ARMv4]>;
def : ProcNoItin<"strongarm", [ARMv4]>;
def : ProcNoItin<"strongarm110", [ARMv4]>;
def : ProcNoItin<"strongarm1100", [ARMv4]>;
def : ProcNoItin<"strongarm1110", [ARMv4]>;
def : ProcNoItin<"arm7tdmi", [ARMv4t]>;
def : ProcNoItin<"arm7tdmi-s", [ARMv4t]>;
def : ProcNoItin<"arm710t", [ARMv4t]>;
def : ProcNoItin<"arm720t", [ARMv4t]>;
def : ProcNoItin<"arm9", [ARMv4t]>;
def : ProcNoItin<"arm9tdmi", [ARMv4t]>;
def : ProcNoItin<"arm920", [ARMv4t]>;
def : ProcNoItin<"arm920t", [ARMv4t]>;
def : ProcNoItin<"arm922t", [ARMv4t]>;
def : ProcNoItin<"arm940t", [ARMv4t]>;
def : ProcNoItin<"ep9312", [ARMv4t]>;
def : ProcNoItin<"arm10tdmi", [ARMv5t]>;
def : ProcNoItin<"arm1020t", [ARMv5t]>;
def : ProcNoItin<"arm9e", [ARMv5te]>;
def : ProcNoItin<"arm926ej-s", [ARMv5te]>;
def : ProcNoItin<"arm946e-s", [ARMv5te]>;
def : ProcNoItin<"arm966e-s", [ARMv5te]>;
def : ProcNoItin<"arm968e-s", [ARMv5te]>;
def : ProcNoItin<"arm10e", [ARMv5te]>;
def : ProcNoItin<"arm1020e", [ARMv5te]>;
def : ProcNoItin<"arm1022e", [ARMv5te]>;
def : ProcNoItin<"xscale", [ARMv5te]>;
def : ProcNoItin<"iwmmxt", [ARMv5te]>;
def : Processor<"arm1136j-s", ARMV6Itineraries, [ARMv6]>;
def : Processor<"arm1136jf-s", ARMV6Itineraries, [ARMv6,
FeatureVFP2,
FeatureHasSlowFPVMLx]>;
def : Processor<"cortex-m0", ARMV6Itineraries, [ARMv6m]>;
def : Processor<"cortex-m0plus", ARMV6Itineraries, [ARMv6m]>;
def : Processor<"cortex-m1", ARMV6Itineraries, [ARMv6m]>;
def : Processor<"sc000", ARMV6Itineraries, [ARMv6m]>;
def : Processor<"arm1176j-s", ARMV6Itineraries, [ARMv6kz]>;
def : Processor<"arm1176jz-s", ARMV6Itineraries, [ARMv6kz]>;
def : Processor<"arm1176jzf-s", ARMV6Itineraries, [ARMv6kz,
FeatureVFP2,
FeatureHasSlowFPVMLx]>;
def : Processor<"mpcorenovfp", ARMV6Itineraries, [ARMv6k]>;
def : Processor<"mpcore", ARMV6Itineraries, [ARMv6k,
FeatureVFP2,
FeatureHasSlowFPVMLx]>;
def : Processor<"arm1156t2-s", ARMV6Itineraries, [ARMv6t2]>;
def : Processor<"arm1156t2f-s", ARMV6Itineraries, [ARMv6t2,
FeatureVFP2,
FeatureHasSlowFPVMLx]>;
def : ProcessorModel<"cortex-a5", CortexA8Model, [ARMv7a, ProcA5,
FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx,
FeatureVMLxForwarding,
FeatureMP,
FeatureVFP4]>;
def : ProcessorModel<"cortex-a7", CortexA8Model, [ARMv7a, ProcA7,
FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureSlowFPBrcc,
FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx,
FeatureVMLxForwarding,
FeatureMP,
FeatureVFP4,
FeatureVirtualization]>;
def : ProcessorModel<"cortex-a8", CortexA8Model, [ARMv7a, ProcA8,
FeatureHasRetAddrStack,
FeatureNonpipelinedVFP,
FeatureTrustZone,
FeatureSlowFPBrcc,
FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx,
FeatureVMLxForwarding]>;
def : ProcessorModel<"cortex-a9", CortexA9Model, [ARMv7a, ProcA9,
FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureHasVMLxHazards,
FeatureVMLxForwarding,
FeatureFP16,
FeatureAvoidPartialCPSR,
FeatureExpandMLx,
FeaturePreferVMOVSR,
FeatureMuxedUnits,
FeatureNEONForFPMovs,
FeatureCheckVLDnAlign,
FeatureMP]>;
def : ProcessorModel<"cortex-a12", CortexA9Model, [ARMv7a, ProcA12,
FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureVMLxForwarding,
FeatureVFP4,
FeatureAvoidPartialCPSR,
FeatureVirtualization,
FeatureMP]>;
def : ProcessorModel<"cortex-a15", CortexA9Model, [ARMv7a, ProcA15,
FeatureDontWidenVMOVS,
FeatureSplatVFPToNeon,
FeatureHasRetAddrStack,
FeatureMuxedUnits,
FeatureTrustZone,
FeatureVFP4,
FeatureMP,
FeatureCheckVLDnAlign,
FeatureAvoidPartialCPSR,
FeatureVirtualization]>;
def : ProcessorModel<"cortex-a17", CortexA9Model, [ARMv7a, ProcA17,
FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureMP,
FeatureVMLxForwarding,
FeatureVFP4,
FeatureAvoidPartialCPSR,
FeatureVirtualization]>;
// FIXME: krait has currently the same features as A9 plus VFP4 and HWDiv
def : ProcessorModel<"krait", CortexA9Model, [ARMv7a, ProcKrait,
FeatureHasRetAddrStack,
FeatureMuxedUnits,
FeatureCheckVLDnAlign,
FeatureVMLxForwarding,
FeatureFP16,
FeatureAvoidPartialCPSR,
FeatureVFP4,
FeatureHWDivThumb,
FeatureHWDivARM]>;
def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
FeatureHasRetAddrStack,
FeatureNEONForFP,
FeatureVFP4,
FeatureUseWideStrideVFP,
FeatureMP,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
FeatureHasSlowFPVMLx,
FeatureHasVMLxHazards,
FeatureProfUnpredicate,
FeaturePrefISHSTBarrier,
FeatureSlowOddRegister,
FeatureSlowLoadDSubreg,
FeatureSlowVGETLNi32,
FeatureSlowVDUP32,
FeatureUseMISched,
FeatureNoPostRASched]>;
def : ProcessorModel<"cortex-r4", CortexA8Model, [ARMv7r, ProcR4,
FeatureHasRetAddrStack,
FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r4f", CortexA8Model, [ARMv7r, ProcR4,
FeatureHasRetAddrStack,
FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx,
FeatureVFP3_D16,
FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5,
FeatureHasRetAddrStack,
FeatureVFP3_D16,
FeatureSlowFPBrcc,
FeatureHWDivARM,
FeatureHasSlowFPVMLx,
FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7,
FeatureHasRetAddrStack,
FeatureVFP3_D16,
FeatureFP16,
FeatureMP,
FeatureSlowFPBrcc,
FeatureHWDivARM,
FeatureHasSlowFPVMLx,
FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r,
FeatureHasRetAddrStack,
FeatureVFP3_D16,
FeatureFP16,
FeatureMP,
FeatureSlowFPBrcc,
FeatureHWDivARM,
FeatureHasSlowFPVMLx,
FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-m3", CortexM4Model, [ARMv7m,
ProcM3,
FeaturePrefLoopAlign32,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"sc300", CortexM4Model, [ARMv7m,
ProcM3,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
FeatureVFP4_D16_SP,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcNoItin<"cortex-m7", [ARMv7em,
FeatureFPARMv8_D16]>;
def : ProcNoItin<"cortex-m23", [ARMv8mBaseline,
FeatureNoMovt]>;
def : ProcessorModel<"cortex-m33", CortexM4Model, [ARMv8mMainline,
FeatureDSP,
FeatureFPARMv8_D16_SP,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"cortex-m35p", CortexM4Model, [ARMv8mMainline,
FeatureDSP,
FeatureFPARMv8_D16_SP,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcNoItin<"cortex-a32", [ARMv8a,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a35", [ARMv8a, ProcA35,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a53", [ARMv8a, ProcA53,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFPAO]>;
def : ProcNoItin<"cortex-a55", [ARMv82a, ProcA55,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureDotProd]>;
def : ProcessorModel<"cortex-a57", CortexA57Model, [ARMv8a, ProcA57,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFPAO,
FeatureAvoidPartialCPSR,
FeatureCheapPredicableCPSR]>;
def : ProcessorModel<"cortex-a72", CortexA57Model, [ARMv8a, ProcA72,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a73", [ARMv8a, ProcA73,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a75", [ARMv82a, ProcA75,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureDotProd]>;
def : ProcNoItin<"cortex-a76", [ARMv82a, ProcA76,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFullFP16,
FeatureDotProd]>;
def : ProcNoItin<"cortex-a76ae", [ARMv82a, ProcA76,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFullFP16,
FeatureDotProd]>;
def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
FeatureHasRetAddrStack,
FeatureNEONForFP,
FeatureVFP4,
FeatureMP,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
FeatureHasSlowFPVMLx,
FeatureCrypto,
FeatureUseMISched,
FeatureZCZeroing,
FeatureNoPostRASched]>;
def : ProcNoItin<"exynos-m1", [ARMv8a, ProcExynos]>;
def : ProcNoItin<"exynos-m2", [ARMv8a, ProcExynos]>;
def : ProcNoItin<"exynos-m3", [ARMv8a, ProcExynos]>;
def : ProcNoItin<"exynos-m4", [ARMv82a, ProcExynos,
FeatureFullFP16,
FeatureDotProd]>;
def : ProcNoItin<"exynos-m5", [ARMv82a, ProcExynos,
FeatureFullFP16,
FeatureDotProd]>;
def : ProcNoItin<"kryo", [ARMv8a, ProcKryo,
FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcessorModel<"cortex-r52", CortexR52Model, [ARMv8r, ProcR52,
FeatureUseMISched,
FeatureFPAO,
FeatureUseAA]>;
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
include "ARMRegisterInfo.td"
include "ARMRegisterBanks.td"
include "ARMCallingConv.td"
//===----------------------------------------------------------------------===//
// Instruction Descriptions
//===----------------------------------------------------------------------===//
include "ARMInstrInfo.td"
def ARMInstrInfo : InstrInfo;
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
def ARMAsmWriter : AsmWriter {
string AsmWriterClassName = "InstPrinter";
int PassSubtarget = 1;
int Variant = 0;
bit isMCAsmWriter = 1;
}
def ARMAsmParser : AsmParser {
bit ReportMultipleNearMisses = 1;
}
def ARMAsmParserVariant : AsmParserVariant {
int Variant = 0;
string Name = "ARM";
string BreakCharacters = ".";
}
def ARM : Target {
// Pull in Instruction Info.
let InstructionSet = ARMInstrInfo;
let AssemblyWriters = [ARMAsmWriter];
let AssemblyParsers = [ARMAsmParser];
let AssemblyParserVariants = [ARMAsmParserVariant];
let AllowRegisterRenaming = 1;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMPredicates.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMPredicates.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMPredicates.td (revision 356465)
@@ -1,211 +1,211 @@
//===-- ARMPredicates.td - ARM Instruction Predicates ------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
def HasV4T : Predicate<"Subtarget->hasV4TOps()">,
AssemblerPredicate<"HasV4TOps", "armv4t">;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">,
AssemblerPredicate<"HasV5TOps", "armv5t">;
def NoV5T : Predicate<"!Subtarget->hasV5TOps()">;
def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
AssemblerPredicate<"HasV5TEOps", "armv5te">;
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
AssemblerPredicate<"HasV6Ops", "armv6">;
def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
def HasV6M : Predicate<"Subtarget->hasV6MOps()">,
AssemblerPredicate<"HasV6MOps",
"armv6m or armv6t2">;
def HasV8MBaseline : Predicate<"Subtarget->hasV8MBaselineOps()">,
AssemblerPredicate<"HasV8MBaselineOps",
"armv8m.base">;
def HasV8MMainline : Predicate<"Subtarget->hasV8MMainlineOps()">,
AssemblerPredicate<"HasV8MMainlineOps",
"armv8m.main">;
def HasV8_1MMainline : Predicate<"Subtarget->hasV8_1MMainlineOps()">,
AssemblerPredicate<"HasV8_1MMainlineOps",
"armv8.1m.main">;
def HasMVEInt : Predicate<"Subtarget->hasMVEIntegerOps()">,
AssemblerPredicate<"HasMVEIntegerOps",
"mve">;
def HasMVEFloat : Predicate<"Subtarget->hasMVEFloatOps()">,
AssemblerPredicate<"HasMVEFloatOps",
"mve.fp">;
def HasFPRegs : Predicate<"Subtarget->hasFPRegs()">,
AssemblerPredicate<"FeatureFPRegs",
"fp registers">;
def HasFPRegs16 : Predicate<"Subtarget->hasFPRegs16()">,
AssemblerPredicate<"FeatureFPRegs16",
"16-bit fp registers">;
def HasFPRegs64 : Predicate<"Subtarget->hasFPRegs64()">,
AssemblerPredicate<"FeatureFPRegs64",
"64-bit fp registers">;
def HasFPRegsV8_1M : Predicate<"Subtarget->hasFPRegs() && Subtarget->hasV8_1MMainlineOps()">,
AssemblerPredicate<"FeatureFPRegs,HasV8_1MMainlineOps",
"armv8.1m.main with FP or MVE">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
AssemblerPredicate<"HasV6T2Ops", "armv6t2">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
def HasV6K : Predicate<"Subtarget->hasV6KOps()">,
AssemblerPredicate<"HasV6KOps", "armv6k">;
def NoV6K : Predicate<"!Subtarget->hasV6KOps()">;
def HasV7 : Predicate<"Subtarget->hasV7Ops()">,
AssemblerPredicate<"HasV7Ops", "armv7">;
def HasV8 : Predicate<"Subtarget->hasV8Ops()">,
AssemblerPredicate<"HasV8Ops", "armv8">;
def PreV8 : Predicate<"!Subtarget->hasV8Ops()">,
AssemblerPredicate<"!HasV8Ops", "armv7 or earlier">;
def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
def HasV8_3a : Predicate<"Subtarget->hasV8_3aOps()">,
AssemblerPredicate<"HasV8_3aOps", "armv8.3a">;
def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">,
AssemblerPredicate<"HasV8_4aOps", "armv8.4a">;
def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">,
AssemblerPredicate<"HasV8_5aOps", "armv8.5a">;
def NoVFP : Predicate<"!Subtarget->hasVFP2Base()">;
def HasVFP2 : Predicate<"Subtarget->hasVFP2Base()">,
- AssemblerPredicate<"FeatureVFP2_SP", "VFP2">;
+ AssemblerPredicate<"FeatureVFP2_D16_SP", "VFP2">;
def HasVFP3 : Predicate<"Subtarget->hasVFP3Base()">,
AssemblerPredicate<"FeatureVFP3_D16_SP", "VFP3">;
def HasVFP4 : Predicate<"Subtarget->hasVFP4Base()">,
AssemblerPredicate<"FeatureVFP4_D16_SP", "VFP4">;
def HasDPVFP : Predicate<"Subtarget->hasFP64()">,
AssemblerPredicate<"FeatureFP64",
"double precision VFP">;
def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8Base()">,
AssemblerPredicate<"FeatureFPARMv8_D16_SP", "FPARMv8">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON", "NEON">;
def HasSHA2 : Predicate<"Subtarget->hasSHA2()">,
AssemblerPredicate<"FeatureSHA2", "sha2">;
def HasAES : Predicate<"Subtarget->hasAES()">,
AssemblerPredicate<"FeatureAES", "aes">;
def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
AssemblerPredicate<"FeatureCrypto", "crypto">;
def HasDotProd : Predicate<"Subtarget->hasDotProd()">,
AssemblerPredicate<"FeatureDotProd", "dotprod">;
def HasCRC : Predicate<"Subtarget->hasCRC()">,
AssemblerPredicate<"FeatureCRC", "crc">;
def HasRAS : Predicate<"Subtarget->hasRAS()">,
AssemblerPredicate<"FeatureRAS", "ras">;
def HasLOB : Predicate<"Subtarget->hasLOB()">,
AssemblerPredicate<"FeatureLOB", "lob">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float conversions">;
def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
AssemblerPredicate<"FeatureFullFP16","full half-float">;
def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">,
AssemblerPredicate<"FeatureFP16FML","full half-float fml">;
def HasDivideInThumb : Predicate<"Subtarget->hasDivideInThumbMode()">,
AssemblerPredicate<"FeatureHWDivThumb", "divide in THUMB">;
def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
AssemblerPredicate<"FeatureHWDivARM", "divide in ARM">;
def HasDSP : Predicate<"Subtarget->hasDSP()">,
AssemblerPredicate<"FeatureDSP", "dsp">;
def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
AssemblerPredicate<"FeatureDB",
"data-barriers">;
def HasDFB : Predicate<"Subtarget->hasFullDataBarrier()">,
AssemblerPredicate<"FeatureDFB",
"full-data-barrier">;
def HasV7Clrex : Predicate<"Subtarget->hasV7Clrex()">,
AssemblerPredicate<"FeatureV7Clrex",
"v7 clrex">;
def HasAcquireRelease : Predicate<"Subtarget->hasAcquireRelease()">,
AssemblerPredicate<"FeatureAcquireRelease",
"acquire/release">;
def HasMP : Predicate<"Subtarget->hasMPExtension()">,
AssemblerPredicate<"FeatureMP",
"mp-extensions">;
def HasVirtualization: Predicate<"false">,
AssemblerPredicate<"FeatureVirtualization",
"virtualization-extensions">;
def HasTrustZone : Predicate<"Subtarget->hasTrustZone()">,
AssemblerPredicate<"FeatureTrustZone",
"TrustZone">;
def Has8MSecExt : Predicate<"Subtarget->has8MSecExt()">,
AssemblerPredicate<"Feature8MSecExt",
"ARMv8-M Security Extensions">;
def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
def IsThumb : Predicate<"Subtarget->isThumb()">,
AssemblerPredicate<"ModeThumb", "thumb">;
def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
def IsThumb2 : Predicate<"Subtarget->isThumb2()">,
AssemblerPredicate<"ModeThumb,FeatureThumb2",
"thumb2">;
def IsMClass : Predicate<"Subtarget->isMClass()">,
AssemblerPredicate<"FeatureMClass", "armv*m">;
def IsNotMClass : Predicate<"!Subtarget->isMClass()">,
AssemblerPredicate<"!FeatureMClass",
"!armv*m">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb", "arm-mode">;
def IsMachO : Predicate<"Subtarget->isTargetMachO()">;
def IsNotMachO : Predicate<"!Subtarget->isTargetMachO()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def IsWindows : Predicate<"Subtarget->isTargetWindows()">;
def IsNotWindows : Predicate<"!Subtarget->isTargetWindows()">;
def IsReadTPHard : Predicate<"Subtarget->isReadTPHard()">;
def IsReadTPSoft : Predicate<"!Subtarget->isReadTPHard()">;
def UseNaClTrap : Predicate<"Subtarget->useNaClTrap()">,
AssemblerPredicate<"FeatureNaClTrap", "NaCl">;
def DontUseNaClTrap : Predicate<"!Subtarget->useNaClTrap()">;
def UseNegativeImmediates :
Predicate<"false">,
AssemblerPredicate<"!FeatureNoNegativeImmediates",
"NegativeImmediates">;
// FIXME: Eventually this will be just "hasV6T2Ops".
let RecomputePerFunction = 1 in {
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseMovtInPic : Predicate<"Subtarget->useMovt() && Subtarget->allowPositionIndependentMovt()">;
def DontUseMovtInPic : Predicate<"!Subtarget->useMovt() || !Subtarget->allowPositionIndependentMovt()">;
def UseFPVMLx: Predicate<"((Subtarget->useFPVMLx() &&"
" TM.Options.AllowFPOpFusion != FPOpFusion::Fast) ||"
"Subtarget->hasMinSize())">;
}
def UseMulOps : Predicate<"Subtarget->useMulOps()">;
// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
// But only select them if more precision in FP computation is allowed, and when
// they are not slower than a mul + add sequence.
// Do not use them for Darwin platforms.
def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast && "
" Subtarget->hasVFP4Base()) && "
"!Subtarget->isTargetDarwin() &&"
"Subtarget->useFPVMLx()">;
def HasFastVGETLNi32 : Predicate<"!Subtarget->hasSlowVGETLNi32()">;
def HasSlowVGETLNi32 : Predicate<"Subtarget->hasSlowVGETLNi32()">;
def HasFastVDUP32 : Predicate<"!Subtarget->hasSlowVDUP32()">;
def HasSlowVDUP32 : Predicate<"Subtarget->hasSlowVDUP32()">;
def UseVMOVSR : Predicate<"Subtarget->preferVMOVSR() ||"
"!Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseVMOVSR : Predicate<"!Subtarget->preferVMOVSR() &&"
"Subtarget->useNEONForSinglePrecisionFP()">;
let RecomputePerFunction = 1 in {
def IsLE : Predicate<"MF->getDataLayout().isLittleEndian()">;
def IsBE : Predicate<"MF->getDataLayout().isBigEndian()">;
}
def GenExecuteOnly : Predicate<"Subtarget->genExecuteOnly()">;
// Armv8.5-A extensions
def HasSB : Predicate<"Subtarget->hasSB()">,
AssemblerPredicate<"FeatureSB", "sb">;
Index: stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h (revision 356465)
@@ -1,865 +1,867 @@
//===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ARM specific subclass of TargetSubtargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
#define LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
#include "ARMFrameLowering.h"
#include "ARMISelLowering.h"
#include "ARMSelectionDAGInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Target/TargetOptions.h"
#include <memory>
#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "ARMGenSubtargetInfo.inc"
namespace llvm {
class ARMBaseTargetMachine;
class GlobalValue;
class StringRef;
class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
enum ARMProcFamilyEnum {
Others,
CortexA12,
CortexA15,
CortexA17,
CortexA32,
CortexA35,
CortexA5,
CortexA53,
CortexA55,
CortexA57,
CortexA7,
CortexA72,
CortexA73,
CortexA75,
CortexA76,
CortexA8,
CortexA9,
CortexM3,
CortexR4,
CortexR4F,
CortexR5,
CortexR52,
CortexR7,
Exynos,
Krait,
Kryo,
Swift
};
enum ARMProcClassEnum {
None,
AClass,
MClass,
RClass
};
enum ARMArchEnum {
ARMv2,
ARMv2a,
ARMv3,
ARMv3m,
ARMv4,
ARMv4t,
ARMv5,
ARMv5t,
ARMv5te,
ARMv5tej,
ARMv6,
ARMv6k,
ARMv6kz,
ARMv6m,
ARMv6sm,
ARMv6t2,
ARMv7a,
ARMv7em,
ARMv7m,
ARMv7r,
ARMv7ve,
ARMv81a,
ARMv82a,
ARMv83a,
ARMv84a,
ARMv85a,
ARMv8a,
ARMv8mBaseline,
ARMv8mMainline,
ARMv8r,
ARMv81mMainline,
};
public:
/// What kind of timing do load multiple/store multiple instructions have.
enum ARMLdStMultipleTiming {
/// Can load/store 2 registers/cycle.
DoubleIssue,
/// Can load/store 2 registers/cycle, but needs an extra cycle if the access
/// is not 64-bit aligned.
DoubleIssueCheckUnalignedAccess,
/// Can load/store 1 register/cycle.
SingleIssue,
/// Can load/store 1 register/cycle, but needs an extra cycle for address
/// computation and potentially also for register writeback.
SingleIssuePlusExtras,
};
protected:
/// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
ARMProcFamilyEnum ARMProcFamily = Others;
/// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
ARMProcClassEnum ARMProcClass = None;
/// ARMArch - ARM architecture
ARMArchEnum ARMArch = ARMv4t;
/// HasV4TOps, HasV5TOps, HasV5TEOps,
/// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
/// Specify whether target support specific ARM ISA variants.
bool HasV4TOps = false;
bool HasV5TOps = false;
bool HasV5TEOps = false;
bool HasV6Ops = false;
bool HasV6MOps = false;
bool HasV6KOps = false;
bool HasV6T2Ops = false;
bool HasV7Ops = false;
bool HasV8Ops = false;
bool HasV8_1aOps = false;
bool HasV8_2aOps = false;
bool HasV8_3aOps = false;
bool HasV8_4aOps = false;
bool HasV8_5aOps = false;
bool HasV8MBaselineOps = false;
bool HasV8MMainlineOps = false;
bool HasV8_1MMainlineOps = false;
bool HasMVEIntegerOps = false;
bool HasMVEFloatOps = false;
/// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
/// floating point ISAs are supported.
bool HasVFPv2 = false;
bool HasVFPv3 = false;
bool HasVFPv4 = false;
bool HasFPARMv8 = false;
bool HasNEON = false;
bool HasFPRegs = false;
bool HasFPRegs16 = false;
bool HasFPRegs64 = false;
/// Versions of the VFP flags restricted to single precision, or to
/// 16 d-registers, or both.
bool HasVFPv2SP = false;
bool HasVFPv3SP = false;
bool HasVFPv4SP = false;
bool HasFPARMv8SP = false;
+ bool HasVFPv2D16 = false;
bool HasVFPv3D16 = false;
bool HasVFPv4D16 = false;
bool HasFPARMv8D16 = false;
+ bool HasVFPv2D16SP = false;
bool HasVFPv3D16SP = false;
bool HasVFPv4D16SP = false;
bool HasFPARMv8D16SP = false;
/// HasDotProd - True if the ARMv8.2A dot product instructions are supported.
bool HasDotProd = false;
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
/// specified. Use the method useNEONForSinglePrecisionFP() to
/// determine if NEON should actually be used.
bool UseNEONForSinglePrecisionFP = false;
/// UseMulOps - True if non-microcoded fused integer multiply-add and
/// multiply-subtract instructions should be used.
bool UseMulOps = false;
/// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
/// whether the FP VML[AS] instructions are slow (if so, don't use them).
bool SlowFPVMLx = false;
/// HasVMLxForwarding - If true, NEON has special multiplier accumulator
/// forwarding to allow mul + mla being issued back to back.
bool HasVMLxForwarding = false;
/// SlowFPBrcc - True if floating point compare + branch is slow.
bool SlowFPBrcc = false;
/// InThumbMode - True if compiling for Thumb, false for ARM.
bool InThumbMode = false;
/// UseSoftFloat - True if we're using software floating point features.
bool UseSoftFloat = false;
/// UseMISched - True if MachineScheduler should be used for this subtarget.
bool UseMISched = false;
/// DisablePostRAScheduler - False if scheduling should happen again after
/// register allocation.
bool DisablePostRAScheduler = false;
/// UseAA - True if using AA during codegen (DAGCombine, MISched, etc)
bool UseAA = false;
/// HasThumb2 - True if Thumb2 instructions are supported.
bool HasThumb2 = false;
/// NoARM - True if subtarget does not support ARM mode execution.
bool NoARM = false;
/// ReserveR9 - True if R9 is not available as a general purpose register.
bool ReserveR9 = false;
/// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
/// 32-bit imms (including global addresses).
bool NoMovt = false;
/// SupportsTailCall - True if the OS supports tail call. The dynamic linker
/// must be able to synthesize call stubs for interworking between ARM and
/// Thumb.
bool SupportsTailCall = false;
/// HasFP16 - True if subtarget supports half-precision FP conversions
bool HasFP16 = false;
/// HasFullFP16 - True if subtarget supports half-precision FP operations
bool HasFullFP16 = false;
/// HasFP16FML - True if subtarget supports half-precision FP fml operations
bool HasFP16FML = false;
/// HasD32 - True if subtarget has the full 32 double precision
/// FP registers for VFPv3.
bool HasD32 = false;
/// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
bool HasHardwareDivideInThumb = false;
/// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
bool HasHardwareDivideInARM = false;
/// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
/// instructions.
bool HasDataBarrier = false;
/// HasFullDataBarrier - True if the subtarget supports DFB data barrier
/// instruction.
bool HasFullDataBarrier = false;
/// HasV7Clrex - True if the subtarget supports CLREX instructions
bool HasV7Clrex = false;
/// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
/// instructions
bool HasAcquireRelease = false;
/// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
/// over 16-bit ones.
bool Pref32BitThumb = false;
/// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
/// that partially update CPSR and add false dependency on the previous
/// CPSR setting instruction.
bool AvoidCPSRPartialUpdate = false;
/// CheapPredicableCPSRDef - If true, disable +1 predication cost
/// for instructions updating CPSR. Enabled for Cortex-A57.
bool CheapPredicableCPSRDef = false;
/// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
/// movs with shifter operand (i.e. asr, lsl, lsr).
bool AvoidMOVsShifterOperand = false;
/// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
/// avoid issue "normal" call instructions to callees which do not return.
bool HasRetAddrStack = false;
/// HasBranchPredictor - True if the subtarget has a branch predictor. Having
/// a branch predictor or not changes the expected cost of taking a branch
/// which affects the choice of whether to use predicated instructions.
bool HasBranchPredictor = true;
/// HasMPExtension - True if the subtarget supports Multiprocessing
/// extension (ARMv7 only).
bool HasMPExtension = false;
/// HasVirtualization - True if the subtarget supports the Virtualization
/// extension.
bool HasVirtualization = false;
/// HasFP64 - If true, the floating point unit supports double
/// precision.
bool HasFP64 = false;
/// If true, the processor supports the Performance Monitor Extensions. These
/// include a generic cycle-counter as well as more fine-grained (often
/// implementation-specific) events.
bool HasPerfMon = false;
/// HasTrustZone - if true, processor supports TrustZone security extensions
bool HasTrustZone = false;
/// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
bool Has8MSecExt = false;
/// HasSHA2 - if true, processor supports SHA1 and SHA256
bool HasSHA2 = false;
/// HasAES - if true, processor supports AES
bool HasAES = false;
/// HasCrypto - if true, processor supports Cryptography extensions
bool HasCrypto = false;
/// HasCRC - if true, processor supports CRC instructions
bool HasCRC = false;
/// HasRAS - if true, the processor supports RAS extensions
bool HasRAS = false;
/// HasLOB - if true, the processor supports the Low Overhead Branch extension
bool HasLOB = false;
/// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
/// particularly effective at zeroing a VFP register.
bool HasZeroCycleZeroing = false;
/// HasFPAO - if true, processor does positive address offset computation faster
bool HasFPAO = false;
/// HasFuseAES - if true, processor executes back to back AES instruction
/// pairs faster.
bool HasFuseAES = false;
/// HasFuseLiterals - if true, processor executes back to back
/// bottom and top halves of literal generation faster.
bool HasFuseLiterals = false;
/// If true, if conversion may decide to leave some instructions unpredicated.
bool IsProfitableToUnpredicate = false;
/// If true, VMOV will be favored over VGETLNi32.
bool HasSlowVGETLNi32 = false;
/// If true, VMOV will be favored over VDUP.
bool HasSlowVDUP32 = false;
/// If true, VMOVSR will be favored over VMOVDRR.
bool PreferVMOVSR = false;
/// If true, ISHST barriers will be used for Release semantics.
bool PreferISHST = false;
/// If true, a VLDM/VSTM starting with an odd register number is considered to
/// take more microops than single VLDRS/VSTRS.
bool SlowOddRegister = false;
/// If true, loading into a D subregister will be penalized.
bool SlowLoadDSubregister = false;
/// If true, use a wider stride when allocating VFP registers.
bool UseWideStrideVFP = false;
/// If true, the AGU and NEON/FPU units are multiplexed.
bool HasMuxedUnits = false;
/// If true, VMOVS will never be widened to VMOVD.
bool DontWidenVMOVS = false;
/// If true, splat a register between VFP and NEON instructions.
bool SplatVFPToNeon = false;
/// If true, run the MLx expansion pass.
bool ExpandMLx = false;
/// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
bool HasVMLxHazards = false;
// If true, read thread pointer from coprocessor register.
bool ReadTPHard = false;
/// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
bool UseNEONForFPMovs = false;
/// If true, VLDn instructions take an extra cycle for unaligned accesses.
bool CheckVLDnAlign = false;
/// If true, VFP instructions are not pipelined.
bool NonpipelinedVFP = false;
/// StrictAlign - If true, the subtarget disallows unaligned memory
/// accesses for some types. For details, see
/// ARMTargetLowering::allowsMisalignedMemoryAccesses().
bool StrictAlign = false;
/// RestrictIT - If true, the subtarget disallows generation of deprecated IT
/// blocks to conform to ARMv8 rule.
bool RestrictIT = false;
/// HasDSP - If true, the subtarget supports the DSP (saturating arith
/// and such) instructions.
bool HasDSP = false;
/// NaCl TRAP instruction is generated instead of the regular TRAP.
bool UseNaClTrap = false;
/// Generate calls via indirect call instructions.
bool GenLongCalls = false;
/// Generate code that does not contain data access to code sections.
bool GenExecuteOnly = false;
/// Target machine allowed unsafe FP math (such as use of NEON fp)
bool UnsafeFPMath = false;
/// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
bool UseSjLjEH = false;
/// Has speculation barrier
bool HasSB = false;
/// Implicitly convert an instruction to a different one if its immediates
/// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
bool NegativeImmediates = true;
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment = 4;
/// CPUString - String name of used CPU.
std::string CPUString;
unsigned MaxInterleaveFactor = 1;
/// Clearance before partial register updates (in number of instructions)
unsigned PartialUpdateClearance = 0;
/// What kind of timing do load multiple/store multiple have (double issue,
/// single issue etc).
ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;
/// The adjustment that we need to apply to get the operand latency from the
/// operand cycle returned by the itinerary data for pre-ISel operands.
int PreISelOperandLatencyAdjustment = 2;
/// What alignment is preferred for loop bodies, in log2(bytes).
unsigned PrefLoopAlignment = 0;
/// OptMinSize - True if we're optimising for minimum code size, equal to
/// the function attribute.
bool OptMinSize = false;
/// IsLittle - The target is Little Endian
bool IsLittle;
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
/// SchedModel - Processor specific instruction costs.
MCSchedModel SchedModel;
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
/// Options passed via command line that could influence the target
const TargetOptions &Options;
const ARMBaseTargetMachine &TM;
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
const ARMBaseTargetMachine &TM, bool IsLittle,
bool MinSize = false);
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
unsigned getMaxInlineSizeThreshold() const {
return 64;
}
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
/// so that we can use initializer lists for subtarget initialization.
ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
const ARMBaseInstrInfo *getInstrInfo() const override {
return InstrInfo.get();
}
const ARMTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
const ARMFrameLowering *getFrameLowering() const override {
return FrameLowering.get();
}
const ARMBaseRegisterInfo *getRegisterInfo() const override {
return &InstrInfo->getRegisterInfo();
}
const CallLowering *getCallLowering() const override;
const InstructionSelector *getInstructionSelector() const override;
const LegalizerInfo *getLegalizerInfo() const override;
const RegisterBankInfo *getRegBankInfo() const override;
private:
ARMSelectionDAGInfo TSInfo;
// Either Thumb1FrameLowering or ARMFrameLowering.
std::unique_ptr<ARMFrameLowering> FrameLowering;
// Either Thumb1InstrInfo or Thumb2InstrInfo.
std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
ARMTargetLowering TLInfo;
/// GlobalISel related APIs.
std::unique_ptr<CallLowering> CallLoweringInfo;
std::unique_ptr<InstructionSelector> InstSelector;
std::unique_ptr<LegalizerInfo> Legalizer;
std::unique_ptr<RegisterBankInfo> RegBankInfo;
void initializeEnvironment();
void initSubtargetFeatures(StringRef CPU, StringRef FS);
ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);
public:
void computeIssueWidth();
bool hasV4TOps() const { return HasV4TOps; }
bool hasV5TOps() const { return HasV5TOps; }
bool hasV5TEOps() const { return HasV5TEOps; }
bool hasV6Ops() const { return HasV6Ops; }
bool hasV6MOps() const { return HasV6MOps; }
bool hasV6KOps() const { return HasV6KOps; }
bool hasV6T2Ops() const { return HasV6T2Ops; }
bool hasV7Ops() const { return HasV7Ops; }
bool hasV8Ops() const { return HasV8Ops; }
bool hasV8_1aOps() const { return HasV8_1aOps; }
bool hasV8_2aOps() const { return HasV8_2aOps; }
bool hasV8_3aOps() const { return HasV8_3aOps; }
bool hasV8_4aOps() const { return HasV8_4aOps; }
bool hasV8_5aOps() const { return HasV8_5aOps; }
bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
bool hasMVEIntegerOps() const { return HasMVEIntegerOps; }
bool hasMVEFloatOps() const { return HasMVEFloatOps; }
bool hasFPRegs() const { return HasFPRegs; }
bool hasFPRegs16() const { return HasFPRegs16; }
bool hasFPRegs64() const { return HasFPRegs64; }
/// @{
/// These functions are obsolete, please consider adding subtarget features
/// or properties instead of calling them.
bool isCortexA5() const { return ARMProcFamily == CortexA5; }
bool isCortexA7() const { return ARMProcFamily == CortexA7; }
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
bool isCortexA15() const { return ARMProcFamily == CortexA15; }
bool isSwift() const { return ARMProcFamily == Swift; }
bool isCortexM3() const { return ARMProcFamily == CortexM3; }
bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
bool isCortexR5() const { return ARMProcFamily == CortexR5; }
bool isKrait() const { return ARMProcFamily == Krait; }
/// @}
bool hasARMOps() const { return !NoARM; }
- bool hasVFP2Base() const { return HasVFPv2SP; }
+ bool hasVFP2Base() const { return HasVFPv2D16SP; }
bool hasVFP3Base() const { return HasVFPv3D16SP; }
bool hasVFP4Base() const { return HasVFPv4D16SP; }
bool hasFPARMv8Base() const { return HasFPARMv8D16SP; }
bool hasNEON() const { return HasNEON; }
bool hasSHA2() const { return HasSHA2; }
bool hasAES() const { return HasAES; }
bool hasCrypto() const { return HasCrypto; }
bool hasDotProd() const { return HasDotProd; }
bool hasCRC() const { return HasCRC; }
bool hasRAS() const { return HasRAS; }
bool hasLOB() const { return HasLOB; }
bool hasVirtualization() const { return HasVirtualization; }
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP;
}
bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
bool hasDataBarrier() const { return HasDataBarrier; }
bool hasFullDataBarrier() const { return HasFullDataBarrier; }
bool hasV7Clrex() const { return HasV7Clrex; }
bool hasAcquireRelease() const { return HasAcquireRelease; }
bool hasAnyDataBarrier() const {
return HasDataBarrier || (hasV6Ops() && !isThumb());
}
bool useMulOps() const { return UseMulOps; }
bool useFPVMLx() const { return !SlowFPVMLx; }
bool hasVMLxForwarding() const { return HasVMLxForwarding; }
bool isFPBrccSlow() const { return SlowFPBrcc; }
bool hasFP64() const { return HasFP64; }
bool hasPerfMon() const { return HasPerfMon; }
bool hasTrustZone() const { return HasTrustZone; }
bool has8MSecExt() const { return Has8MSecExt; }
bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
bool hasFPAO() const { return HasFPAO; }
bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
bool hasSlowVDUP32() const { return HasSlowVDUP32; }
bool preferVMOVSR() const { return PreferVMOVSR; }
bool preferISHSTBarriers() const { return PreferISHST; }
bool expandMLx() const { return ExpandMLx; }
bool hasVMLxHazards() const { return HasVMLxHazards; }
bool hasSlowOddRegister() const { return SlowOddRegister; }
bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
bool useWideStrideVFP() const { return UseWideStrideVFP; }
bool hasMuxedUnits() const { return HasMuxedUnits; }
bool dontWidenVMOVS() const { return DontWidenVMOVS; }
bool useSplatVFPToNeon() const { return SplatVFPToNeon; }
bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
bool nonpipelinedVFP() const { return NonpipelinedVFP; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
bool cheapPredicableCPSRDef() const { return CheapPredicableCPSRDef; }
bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
bool hasRetAddrStack() const { return HasRetAddrStack; }
bool hasBranchPredictor() const { return HasBranchPredictor; }
bool hasMPExtension() const { return HasMPExtension; }
bool hasDSP() const { return HasDSP; }
bool useNaClTrap() const { return UseNaClTrap; }
bool useSjLjEH() const { return UseSjLjEH; }
bool hasSB() const { return HasSB; }
bool genLongCalls() const { return GenLongCalls; }
bool genExecuteOnly() const { return GenExecuteOnly; }
bool hasFP16() const { return HasFP16; }
bool hasD32() const { return HasD32; }
bool hasFullFP16() const { return HasFullFP16; }
bool hasFP16FML() const { return HasFP16FML; }
bool hasFuseAES() const { return HasFuseAES; }
bool hasFuseLiterals() const { return HasFuseLiterals; }
/// Return true if the CPU supports any kind of instruction fusion.
bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }
const Triple &getTargetTriple() const { return TargetTriple; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
// ARM EABI is the bare-metal EABI described in ARM ABI documents and
// can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
// FIXME: Add a flag for bare-metal for that target and set Triple::EABI
// even for GNUEABI, so we can make a distinction here and still conform to
// the EABI on GNU (and Android) mode. This requires change in Clang, too.
// FIXME: The Darwin exception is temporary, while we move users to
// "*-*-*-macho" triples as quickly as possible.
bool isTargetAEABI() const {
return (TargetTriple.getEnvironment() == Triple::EABI ||
TargetTriple.getEnvironment() == Triple::EABIHF) &&
!isTargetDarwin() && !isTargetWindows();
}
bool isTargetGNUAEABI() const {
return (TargetTriple.getEnvironment() == Triple::GNUEABI ||
TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
!isTargetDarwin() && !isTargetWindows();
}
bool isTargetMuslAEABI() const {
return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
!isTargetDarwin() && !isTargetWindows();
}
// ARM Targets that support EHABI exception handling standard
// Darwin uses SjLj. Other targets might need more checks.
bool isTargetEHABICompatible() const {
return (TargetTriple.getEnvironment() == Triple::EABI ||
TargetTriple.getEnvironment() == Triple::GNUEABI ||
TargetTriple.getEnvironment() == Triple::MuslEABI ||
TargetTriple.getEnvironment() == Triple::EABIHF ||
TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
isTargetAndroid()) &&
!isTargetDarwin() && !isTargetWindows();
}
bool isTargetHardFloat() const;
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
bool isXRaySupported() const override;
bool isAPCS_ABI() const;
bool isAAPCS_ABI() const;
bool isAAPCS16_ABI() const;
bool isROPI() const;
bool isRWPI() const;
bool useMachineScheduler() const { return UseMISched; }
bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
bool useSoftFloat() const { return UseSoftFloat; }
bool isThumb() const { return InThumbMode; }
bool hasMinSize() const { return OptMinSize; }
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
bool isThumb2() const { return InThumbMode && HasThumb2; }
bool hasThumb2() const { return HasThumb2; }
bool isMClass() const { return ARMProcClass == MClass; }
bool isRClass() const { return ARMProcClass == RClass; }
bool isAClass() const { return ARMProcClass == AClass; }
bool isReadTPHard() const { return ReadTPHard; }
bool isR9Reserved() const {
return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
}
bool useR7AsFramePointer() const {
return isTargetDarwin() || (!isTargetWindows() && isThumb());
}
/// Returns true if the frame setup is split into two separate pushes (first
/// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
/// to lr. This is always required on Thumb1-only targets, as the push and
/// pop instructions can't access the high registers.
bool splitFramePushPop(const MachineFunction &MF) const {
return (useR7AsFramePointer() &&
MF.getTarget().Options.DisableFramePointerElim(MF)) ||
isThumb1Only();
}
bool useStride4VFPs() const;
bool useMovt() const;
bool supportsTailCall() const { return SupportsTailCall; }
bool allowsUnalignedMem() const { return !StrictAlign; }
bool restrictIT() const { return RestrictIT; }
const std::string & getCPUString() const { return CPUString; }
bool isLittle() const { return IsLittle; }
unsigned getMispredictionPenalty() const;
/// Returns true if machine scheduler should be enabled.
bool enableMachineScheduler() const override;
/// True for some subtargets at > -O0.
bool enablePostRAScheduler() const override;
/// Enable use of alias analysis during code generation (during MI
/// scheduling, DAGCombine, etc.).
bool useAA() const override { return UseAA; }
// enableAtomicExpand- True if we need to expand our atomics.
bool enableAtomicExpand() const override;
/// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
/// getStackAlignment - Returns the minimum alignment known to hold of the
/// stack frame on entry to the function and which must be maintained by every
/// function for this subtarget.
unsigned getStackAlignment() const { return stackAlignment; }
unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }
ARMLdStMultipleTiming getLdStMultipleTiming() const {
return LdStMultipleTiming;
}
int getPreISelOperandLatencyAdjustment() const {
return PreISelOperandLatencyAdjustment;
}
/// True if the GV will be accessed via an indirect symbol.
bool isGVIndirectSymbol(const GlobalValue *GV) const;
/// Returns the constant pool modifier needed to access the GV.
bool isGVInGOT(const GlobalValue *GV) const;
/// True if fast-isel is used.
bool useFastISel() const;
/// Returns the correct return opcode for the current feature set.
/// Use BX if available to allow mixing thumb/arm code, but fall back
/// to plain mov pc,lr on ARMv4.
unsigned getReturnOpcode() const {
if (isThumb())
return ARM::tBX_RET;
if (hasV4TOps())
return ARM::BX_RET;
return ARM::MOVPCLR;
}
/// Allow movt+movw for PIC global address calculation.
/// ELF does not have GOT relocations for movt+movw.
/// ROPI does not use GOT.
bool allowPositionIndependentMovt() const {
return isROPI() || !isTargetELF();
}
unsigned getPrefLoopAlignment() const {
return PrefLoopAlignment;
}
bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
unsigned PhysReg) const override;
unsigned getGPRAllocationOrder(const MachineFunction &MF) const;
};
} // end namespace llvm
#endif // LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
Index: stable/12/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (revision 356465)
@@ -1,11891 +1,11891 @@
//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ARMFeatures.h"
#include "ARMBaseInstrInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMInstPrinter.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "TargetInfo/ARMTargetInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/ARMEHABI.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#define DEBUG_TYPE "asm-parser"
using namespace llvm;
namespace llvm {
extern const MCInstrDesc ARMInsts[];
} // end namespace llvm
namespace {
enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
static cl::opt<ImplicitItModeTy> ImplicitItMode(
"arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
cl::desc("Allow conditional instructions outdside of an IT block"),
cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
"Accept in both ISAs, emit implicit ITs in Thumb"),
clEnumValN(ImplicitItModeTy::Never, "never",
"Warn in ARM, reject in Thumb"),
clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
"Accept in ARM, reject in Thumb"),
clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
"Warn in ARM, emit implicit ITs in Thumb")));
static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
cl::init(false));
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
// Position==0 means we're not in an IT block at all. Position==1
// means we want the first state bit, which is always 0 (Then).
// Position==2 means we want the second state bit, stored at bit 3
// of Mask, and so on downwards. So (5 - Position) will shift the
// right bit down to bit 0, including the always-0 bit at bit 4 for
// the mandatory initial Then.
return (Mask >> (5 - Position) & 1);
}
class UnwindContext {
using Locs = SmallVector<SMLoc, 4>;
MCAsmParser &Parser;
Locs FnStartLocs;
Locs CantUnwindLocs;
Locs PersonalityLocs;
Locs PersonalityIndexLocs;
Locs HandlerDataLocs;
int FPReg;
public:
UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
bool hasFnStart() const { return !FnStartLocs.empty(); }
bool cantUnwind() const { return !CantUnwindLocs.empty(); }
bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
bool hasPersonality() const {
return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
}
void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
void saveFPReg(int Reg) { FPReg = Reg; }
int getFPReg() const { return FPReg; }
void emitFnStartLocNotes() const {
for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
FI != FE; ++FI)
Parser.Note(*FI, ".fnstart was specified here");
}
void emitCantUnwindLocNotes() const {
for (Locs::const_iterator UI = CantUnwindLocs.begin(),
UE = CantUnwindLocs.end(); UI != UE; ++UI)
Parser.Note(*UI, ".cantunwind was specified here");
}
void emitHandlerDataLocNotes() const {
for (Locs::const_iterator HI = HandlerDataLocs.begin(),
HE = HandlerDataLocs.end(); HI != HE; ++HI)
Parser.Note(*HI, ".handlerdata was specified here");
}
void emitPersonalityLocNotes() const {
for (Locs::const_iterator PI = PersonalityLocs.begin(),
PE = PersonalityLocs.end(),
PII = PersonalityIndexLocs.begin(),
PIE = PersonalityIndexLocs.end();
PI != PE || PII != PIE;) {
if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
Parser.Note(*PI++, ".personality was specified here");
else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
Parser.Note(*PII++, ".personalityindex was specified here");
else
llvm_unreachable(".personality and .personalityindex cannot be "
"at the same location");
}
}
void reset() {
FnStartLocs = Locs();
CantUnwindLocs = Locs();
PersonalityLocs = Locs();
HandlerDataLocs = Locs();
PersonalityIndexLocs = Locs();
FPReg = ARM::SP;
}
};
class ARMAsmParser : public MCTargetAsmParser {
const MCRegisterInfo *MRI;
UnwindContext UC;
ARMTargetStreamer &getTargetStreamer() {
assert(getParser().getStreamer().getTargetStreamer() &&
"do not have a target streamer");
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<ARMTargetStreamer &>(TS);
}
// Map of register aliases registers via the .req directive.
StringMap<unsigned> RegisterReqs;
bool NextSymbolIsThumb;
bool useImplicitITThumb() const {
return ImplicitItMode == ImplicitItModeTy::Always ||
ImplicitItMode == ImplicitItModeTy::ThumbOnly;
}
bool useImplicitITARM() const {
return ImplicitItMode == ImplicitItModeTy::Always ||
ImplicitItMode == ImplicitItModeTy::ARMOnly;
}
struct {
ARMCC::CondCodes Cond; // Condition for IT block.
unsigned Mask:4; // Condition mask for instructions.
// Starting at first 1 (from lsb).
// '1' condition as indicated in IT.
// '0' inverse of condition (else).
// Count of instructions in IT block is
// 4 - trailingzeroes(mask)
// Note that this does not have the same encoding
// as in the IT instruction, which also depends
// on the low bit of the condition code.
unsigned CurPosition; // Current position in parsing of IT
// block. In range [0,4], with 0 being the IT
// instruction itself. Initialized according to
// count of instructions in block. ~0U if no
// active IT block.
bool IsExplicit; // true - The IT instruction was present in the
// input, we should not modify it.
// false - The IT instruction was added
// implicitly, we can extend it if that
// would be legal.
} ITState;
SmallVector<MCInst, 4> PendingConditionalInsts;
void flushPendingInstructions(MCStreamer &Out) override {
if (!inImplicitITBlock()) {
assert(PendingConditionalInsts.size() == 0);
return;
}
// Emit the IT instruction
MCInst ITInst;
ITInst.setOpcode(ARM::t2IT);
ITInst.addOperand(MCOperand::createImm(ITState.Cond));
ITInst.addOperand(MCOperand::createImm(ITState.Mask));
Out.EmitInstruction(ITInst, getSTI());
// Emit the conditonal instructions
assert(PendingConditionalInsts.size() <= 4);
for (const MCInst &Inst : PendingConditionalInsts) {
Out.EmitInstruction(Inst, getSTI());
}
PendingConditionalInsts.clear();
// Clear the IT state
ITState.Mask = 0;
ITState.CurPosition = ~0U;
}
bool inITBlock() { return ITState.CurPosition != ~0U; }
bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
bool lastInITBlock() {
return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
}
void forwardITPosition() {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
// mark the block as done, except for implicit IT blocks, which we leave
// open until we find an instruction that can't be added to it.
unsigned TZ = countTrailingZeros(ITState.Mask);
if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
ITState.CurPosition = ~0U; // Done with the IT block after this.
}
// Rewind the state of the current IT block, removing the last slot from it.
void rewindImplicitITPosition() {
assert(inImplicitITBlock());
assert(ITState.CurPosition > 1);
ITState.CurPosition--;
unsigned TZ = countTrailingZeros(ITState.Mask);
unsigned NewMask = 0;
NewMask |= ITState.Mask & (0xC << TZ);
NewMask |= 0x2 << TZ;
ITState.Mask = NewMask;
}
// Rewind the state of the current IT block, removing the last slot from it.
// If we were at the first slot, this closes the IT block.
void discardImplicitITBlock() {
assert(inImplicitITBlock());
assert(ITState.CurPosition == 1);
ITState.CurPosition = ~0U;
}
// Return the low-subreg of a given Q register.
unsigned getDRegFromQReg(unsigned QReg) const {
return MRI->getSubReg(QReg, ARM::dsub_0);
}
// Get the condition code corresponding to the current IT block slot.
ARMCC::CondCodes currentITCond() {
unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
}
// Invert the condition of the current IT block slot without changing any
// other slots in the same block.
void invertCurrentITCondition() {
if (ITState.CurPosition == 1) {
ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
} else {
ITState.Mask ^= 1 << (5 - ITState.CurPosition);
}
}
// Returns true if the current IT block is full (all 4 slots used).
bool isITBlockFull() {
return inITBlock() && (ITState.Mask & 1);
}
// Extend the current implicit IT block to have one more slot with the given
// condition code.
void extendImplicitITBlock(ARMCC::CondCodes Cond) {
assert(inImplicitITBlock());
assert(!isITBlockFull());
assert(Cond == ITState.Cond ||
Cond == ARMCC::getOppositeCondition(ITState.Cond));
unsigned TZ = countTrailingZeros(ITState.Mask);
unsigned NewMask = 0;
// Keep any existing condition bits.
NewMask |= ITState.Mask & (0xE << TZ);
// Insert the new condition bit.
NewMask |= (Cond != ITState.Cond) << TZ;
// Move the trailing 1 down one bit.
NewMask |= 1 << (TZ - 1);
ITState.Mask = NewMask;
}
// Create a new implicit IT block with a dummy condition code.
void startImplicitITBlock() {
assert(!inITBlock());
ITState.Cond = ARMCC::AL;
ITState.Mask = 8;
ITState.CurPosition = 1;
ITState.IsExplicit = false;
}
// Create a new explicit IT block with the given condition and mask.
// The mask should be in the format used in ARMOperand and
// MCOperand, with a 1 implying 'e', regardless of the low bit of
// the condition.
void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
assert(!inITBlock());
ITState.Cond = Cond;
ITState.Mask = Mask;
ITState.CurPosition = 0;
ITState.IsExplicit = true;
}
struct {
unsigned Mask : 4;
unsigned CurPosition;
} VPTState;
bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
void forwardVPTPosition() {
if (!inVPTBlock()) return;
unsigned TZ = countTrailingZeros(VPTState.Mask);
if (++VPTState.CurPosition == 5 - TZ)
VPTState.CurPosition = ~0U;
}
void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Note(L, Msg, Range);
}
bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Warning(L, Msg, Range);
}
bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Error(L, Msg, Range);
}
bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
unsigned ListNo, bool IsARPop = false);
bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
unsigned ListNo);
int tryParseRegister();
bool tryParseRegisterWithWriteBack(OperandVector &);
int tryParseShiftRegister(OperandVector &);
bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
bool parseMemory(OperandVector &);
bool parseOperand(OperandVector &, StringRef Mnemonic);
bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
unsigned &ShiftAmount);
bool parseLiteralValues(unsigned Size, SMLoc L);
bool parseDirectiveThumb(SMLoc L);
bool parseDirectiveARM(SMLoc L);
bool parseDirectiveThumbFunc(SMLoc L);
bool parseDirectiveCode(SMLoc L);
bool parseDirectiveSyntax(SMLoc L);
bool parseDirectiveReq(StringRef Name, SMLoc L);
bool parseDirectiveUnreq(SMLoc L);
bool parseDirectiveArch(SMLoc L);
bool parseDirectiveEabiAttr(SMLoc L);
bool parseDirectiveCPU(SMLoc L);
bool parseDirectiveFPU(SMLoc L);
bool parseDirectiveFnStart(SMLoc L);
bool parseDirectiveFnEnd(SMLoc L);
bool parseDirectiveCantUnwind(SMLoc L);
bool parseDirectivePersonality(SMLoc L);
bool parseDirectiveHandlerData(SMLoc L);
bool parseDirectiveSetFP(SMLoc L);
bool parseDirectivePad(SMLoc L);
bool parseDirectiveRegSave(SMLoc L, bool IsVector);
bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
bool parseDirectiveLtorg(SMLoc L);
bool parseDirectiveEven(SMLoc L);
bool parseDirectivePersonalityIndex(SMLoc L);
bool parseDirectiveUnwindRaw(SMLoc L);
bool parseDirectiveTLSDescSeq(SMLoc L);
bool parseDirectiveMovSP(SMLoc L);
bool parseDirectiveObjectArch(SMLoc L);
bool parseDirectiveArchExtension(SMLoc L);
bool parseDirectiveAlign(SMLoc L);
bool parseDirectiveThumbSet(SMLoc L);
bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
unsigned &PredicationCode,
unsigned &VPTPredicationCode, bool &CarrySetting,
unsigned &ProcessorIMod, StringRef &ITMask);
void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
StringRef FullInst, bool &CanAcceptCarrySet,
bool &CanAcceptPredicationCode,
bool &CanAcceptVPTPredicationCode);
void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
OperandVector &Operands);
bool isThumb() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[ARM::ModeThumb];
}
bool isThumbOne() const {
return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool isThumbTwo() const {
return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool hasThumb() const {
return getSTI().getFeatureBits()[ARM::HasV4TOps];
}
bool hasThumb2() const {
return getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool hasV6Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6Ops];
}
bool hasV6T2Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
}
bool hasV6MOps() const {
return getSTI().getFeatureBits()[ARM::HasV6MOps];
}
bool hasV7Ops() const {
return getSTI().getFeatureBits()[ARM::HasV7Ops];
}
bool hasV8Ops() const {
return getSTI().getFeatureBits()[ARM::HasV8Ops];
}
bool hasV8MBaseline() const {
return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
}
bool hasV8MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
}
bool hasV8_1MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
}
bool hasMVE() const {
return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
}
bool hasMVEFloat() const {
return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
}
bool has8MSecExt() const {
return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
}
bool hasARM() const {
return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
}
bool hasDSP() const {
return getSTI().getFeatureBits()[ARM::FeatureDSP];
}
bool hasD32() const {
return getSTI().getFeatureBits()[ARM::FeatureD32];
}
bool hasV8_1aOps() const {
return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
}
bool hasRAS() const {
return getSTI().getFeatureBits()[ARM::FeatureRAS];
}
void SwitchMode() {
MCSubtargetInfo &STI = copySTI();
auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
}
void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
bool isMClass() const {
return getSTI().getFeatureBits()[ARM::FeatureMClass];
}
/// @name Auto-generated Match Functions
/// {
#define GET_ASSEMBLER_HEADER
#include "ARMGenAsmMatcher.inc"
/// }
OperandMatchResultTy parseITCondCode(OperandVector &);
OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
OperandMatchResultTy parseBankedRegOperand(OperandVector &);
OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
int High);
OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
return parsePKHImm(O, "lsl", 0, 31);
}
OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
return parsePKHImm(O, "asr", 1, 32);
}
OperandMatchResultTy parseSetEndImm(OperandVector &);
OperandMatchResultTy parseShifterImm(OperandVector &);
OperandMatchResultTy parseRotImm(OperandVector &);
OperandMatchResultTy parseModImm(OperandVector &);
OperandMatchResultTy parseBitfield(OperandVector &);
OperandMatchResultTy parsePostIdxReg(OperandVector &);
OperandMatchResultTy parseAM3Offset(OperandVector &);
OperandMatchResultTy parseFPImm(OperandVector &);
OperandMatchResultTy parseVectorList(OperandVector &);
OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
SMLoc &EndLoc);
// Asm Match Converter Methods
void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
void cvtThumbBranches(MCInst &Inst, const OperandVector &);
void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool isITBlockTerminator(MCInst &Inst) const;
void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
bool Load, bool ARMMode, bool Writeback);
public:
enum ARMMatchResultTy {
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
Match_RequiresNotITBlock,
Match_RequiresV6,
Match_RequiresThumb2,
Match_RequiresV8,
Match_RequiresFlagSetting,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "ARMGenAsmMatcher.inc"
};
ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), UC(Parser) {
MCAsmParserExtension::Initialize(Parser);
// Cache the MCRegisterInfo.
MRI = getContext().getRegisterInfo();
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
// Add build attributes based on the selected target.
if (AddBuildAttributes)
getTargetStreamer().emitTargetAttributes(STI);
// Not in an ITBlock to start with.
ITState.CurPosition = ~0U;
VPTState.CurPosition = ~0U;
NextSymbolIsThumb = false;
}
// Implementation of the MCTargetAsmParser interface:
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
bool ParseDirective(AsmToken DirectiveID) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
SmallVectorImpl<NearMissInfo> &NearMisses,
bool MatchingInlineAsm, bool &EmitInITBlock,
MCStreamer &Out);
struct NearMissMessage {
SMLoc Loc;
SmallString<128> Message;
};
const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
SmallVectorImpl<NearMissMessage> &NearMissesOut,
SMLoc IDLoc, OperandVector &Operands);
void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
OperandVector &Operands);
void doBeforeLabelEmit(MCSymbol *Symbol) override;
void onLabelParsed(MCSymbol *Symbol) override;
};
/// ARMOperand - Instances of this class represent a parsed ARM machine
/// operand.
class ARMOperand : public MCParsedAsmOperand {
enum KindTy {
k_CondCode,
k_VPTPred,
k_CCOut,
k_ITCondMask,
k_CoprocNum,
k_CoprocReg,
k_CoprocOption,
k_Immediate,
k_MemBarrierOpt,
k_InstSyncBarrierOpt,
k_TraceSyncBarrierOpt,
k_Memory,
k_PostIndexRegister,
k_MSRMask,
k_BankedReg,
k_ProcIFlags,
k_VectorIndex,
k_Register,
k_RegisterList,
k_RegisterListWithAPSR,
k_DPRRegisterList,
k_SPRRegisterList,
k_FPSRegisterListWithVPR,
k_FPDRegisterListWithVPR,
k_VectorList,
k_VectorListAllLanes,
k_VectorListIndexed,
k_ShiftedRegister,
k_ShiftedImmediate,
k_ShifterImmediate,
k_RotateImmediate,
k_ModifiedImmediate,
k_ConstantPoolImmediate,
k_BitfieldDescriptor,
k_Token,
} Kind;
SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
struct CCOp {
ARMCC::CondCodes Val;
};
struct VCCOp {
ARMVCC::VPTCodes Val;
};
struct CopOp {
unsigned Val;
};
struct CoprocOptionOp {
unsigned Val;
};
struct ITMaskOp {
unsigned Mask:4;
};
struct MBOptOp {
ARM_MB::MemBOpt Val;
};
struct ISBOptOp {
ARM_ISB::InstSyncBOpt Val;
};
struct TSBOptOp {
ARM_TSB::TraceSyncBOpt Val;
};
struct IFlagsOp {
ARM_PROC::IFlags Val;
};
struct MMaskOp {
unsigned Val;
};
struct BankedRegOp {
unsigned Val;
};
struct TokOp {
const char *Data;
unsigned Length;
};
struct RegOp {
unsigned RegNum;
};
// A vector register list is a sequential list of 1 to 4 registers.
struct VectorListOp {
unsigned RegNum;
unsigned Count;
unsigned LaneIndex;
bool isDoubleSpaced;
};
struct VectorIndexOp {
unsigned Val;
};
struct ImmOp {
const MCExpr *Val;
};
/// Combined record for all forms of ARM address expressions.
struct MemoryOp {
unsigned BaseRegNum;
// Offset is in OffsetReg or OffsetImm. If both are zero, no offset
// was specified.
const MCConstantExpr *OffsetImm; // Offset immediate value
unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
unsigned ShiftImm; // shift for OffsetReg.
unsigned Alignment; // 0 = no alignment specified
// n = alignment in bytes (2, 4, 8, 16, or 32)
unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
};
struct PostIdxRegOp {
unsigned RegNum;
bool isAdd;
ARM_AM::ShiftOpc ShiftTy;
unsigned ShiftImm;
};
struct ShifterImmOp {
bool isASR;
unsigned Imm;
};
struct RegShiftedRegOp {
ARM_AM::ShiftOpc ShiftTy;
unsigned SrcReg;
unsigned ShiftReg;
unsigned ShiftImm;
};
struct RegShiftedImmOp {
ARM_AM::ShiftOpc ShiftTy;
unsigned SrcReg;
unsigned ShiftImm;
};
struct RotImmOp {
unsigned Imm;
};
struct ModImmOp {
unsigned Bits;
unsigned Rot;
};
struct BitfieldOp {
unsigned LSB;
unsigned Width;
};
union {
struct CCOp CC;
struct VCCOp VCC;
struct CopOp Cop;
struct CoprocOptionOp CoprocOption;
struct MBOptOp MBOpt;
struct ISBOptOp ISBOpt;
struct TSBOptOp TSBOpt;
struct ITMaskOp ITMask;
struct IFlagsOp IFlags;
struct MMaskOp MMask;
struct BankedRegOp BankedReg;
struct TokOp Tok;
struct RegOp Reg;
struct VectorListOp VectorList;
struct VectorIndexOp VectorIndex;
struct ImmOp Imm;
struct MemoryOp Memory;
struct PostIdxRegOp PostIdxReg;
struct ShifterImmOp ShifterImm;
struct RegShiftedRegOp RegShiftedReg;
struct RegShiftedImmOp RegShiftedImm;
struct RotImmOp RotImm;
struct ModImmOp ModImm;
struct BitfieldOp Bitfield;
};
public:
ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
/// getLocRange - Get the range between the first and last token of this
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
/// getAlignmentLoc - Get the location of the Alignment token of this operand.
SMLoc getAlignmentLoc() const {
assert(Kind == k_Memory && "Invalid access!");
return AlignmentLoc;
}
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
}
ARMVCC::VPTCodes getVPTPred() const {
assert(isVPTPred() && "Invalid access!");
return VCC.Val;
}
unsigned getCoproc() const {
assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
return Cop.Val;
}
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
unsigned getReg() const override {
assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
return Reg.RegNum;
}
const SmallVectorImpl<unsigned> &getRegList() const {
assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
Kind == k_FPSRegisterListWithVPR ||
Kind == k_FPDRegisterListWithVPR) &&
"Invalid access!");
return Registers;
}
const MCExpr *getImm() const {
assert(isImm() && "Invalid access!");
return Imm.Val;
}
const MCExpr *getConstantPoolImm() const {
assert(isConstantPoolImm() && "Invalid access!");
return Imm.Val;
}
unsigned getVectorIndex() const {
assert(Kind == k_VectorIndex && "Invalid access!");
return VectorIndex.Val;
}
ARM_MB::MemBOpt getMemBarrierOpt() const {
assert(Kind == k_MemBarrierOpt && "Invalid access!");
return MBOpt.Val;
}
ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
return ISBOpt.Val;
}
ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
return TSBOpt.Val;
}
ARM_PROC::IFlags getProcIFlags() const {
assert(Kind == k_ProcIFlags && "Invalid access!");
return IFlags.Val;
}
unsigned getMSRMask() const {
assert(Kind == k_MSRMask && "Invalid access!");
return MMask.Val;
}
unsigned getBankedReg() const {
assert(Kind == k_BankedReg && "Invalid access!");
return BankedReg.Val;
}
bool isCoprocNum() const { return Kind == k_CoprocNum; }
bool isCoprocReg() const { return Kind == k_CoprocReg; }
bool isCoprocOption() const { return Kind == k_CoprocOption; }
bool isCondCode() const { return Kind == k_CondCode; }
bool isVPTPred() const { return Kind == k_VPTPred; }
bool isCCOut() const { return Kind == k_CCOut; }
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
bool isImm() const override {
return Kind == k_Immediate;
}
bool isARMBranchTarget() const {
if (!isImm()) return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
return CE->getValue() % 4 == 0;
return true;
}
bool isThumbBranchTarget() const {
if (!isImm()) return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
return CE->getValue() % 2 == 0;
return true;
}
// checks whether this operand is an unsigned offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
bool isUnsignedOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Align = 1LL << scale;
int64_t Max = Align * ((1LL << width) - 1);
return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
}
return false;
}
// checks whether this operand is an signed offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
bool isSignedOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Align = 1LL << scale;
int64_t Max = Align * ((1LL << (width-1)) - 1);
int64_t Min = -Align * (1LL << (width-1));
return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
}
return false;
}
// checks whether this operand is an offset suitable for the LE /
// LETP instructions in Arm v8.1M
bool isLEOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
return Val < 0 && Val >= -4094 && (Val & 1) == 0;
}
return false;
}
// checks whether this operand is a memory operand computed as an offset
// applied to PC. the offset may have 8 bits of magnitude and is represented
// with two bits of shift. textually it may be either [pc, #imm], #imm or
// relocable expression...
bool isThumbMemPC() const {
int64_t Val = 0;
if (isImm()) {
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
if (!CE) return false;
Val = CE->getValue();
}
else if (isGPRMem()) {
if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
if(Memory.BaseRegNum != ARM::PC) return false;
Val = Memory.OffsetImm->getValue();
}
else return false;
return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
}
bool isFPImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
return Val != -1;
}
template<int64_t N, int64_t M>
bool isImmediate() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= N && Value <= M;
}
template<int64_t N, int64_t M>
bool isImmediateS4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= N && Value <= M;
}
template<int64_t N, int64_t M>
bool isImmediateS2() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 1) == 0) && Value >= N && Value <= M;
}
bool isFBits16() const {
return isImmediate<0, 17>();
}
bool isFBits32() const {
return isImmediate<1, 33>();
}
bool isImm8s4() const {
return isImmediateS4<-1020, 1020>();
}
bool isImm7s4() const {
return isImmediateS4<-508, 508>();
}
bool isImm7Shift0() const {
return isImmediate<-127, 127>();
}
bool isImm7Shift1() const {
return isImmediateS2<-255, 255>();
}
bool isImm7Shift2() const {
return isImmediateS4<-511, 511>();
}
bool isImm7() const {
return isImmediate<-127, 127>();
}
bool isImm0_1020s4() const {
return isImmediateS4<0, 1020>();
}
bool isImm0_508s4() const {
return isImmediateS4<0, 508>();
}
bool isImm0_508s4Neg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = -CE->getValue();
// explicitly exclude zero. we want that to use the normal 0_508 version.
return ((Value & 3) == 0) && Value > 0 && Value <= 508;
}
bool isImm0_4095Neg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
// isImm0_4095Neg is used with 32-bit immediates only.
// 32-bit immediates are zero extended to 64-bit when parsed,
// thus simple -CE->getValue() results in a big negative number,
// not a small positive number as intended
if ((CE->getValue() >> 32) > 0) return false;
uint32_t Value = -static_cast<uint32_t>(CE->getValue());
return Value > 0 && Value < 4096;
}
bool isImm0_7() const {
return isImmediate<0, 7>();
}
bool isImm1_16() const {
return isImmediate<1, 16>();
}
bool isImm1_32() const {
return isImmediate<1, 32>();
}
bool isImm8_255() const {
return isImmediate<8, 255>();
}
bool isImm256_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// If it's not a constant expression, it'll generate a fixup and be
// handled later.
if (!CE) return true;
int64_t Value = CE->getValue();
return Value >= 256 && Value < 65536;
}
bool isImm0_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// If it's not a constant expression, it'll generate a fixup and be
// handled later.
if (!CE) return true;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 65536;
}
bool isImm24bit() const {
return isImmediate<0, 0xffffff + 1>();
}
bool isImmThumbSR() const {
return isImmediate<1, 33>();
}
template<int shift>
bool isExpImmValue(uint64_t Value) const {
uint64_t mask = (1 << shift) - 1;
if ((Value & mask) != 0 || (Value >> shift) > 0xff)
return false;
return true;
}
template<int shift>
bool isExpImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
return isExpImmValue<shift>(CE->getValue());
}
template<int shift, int size>
bool isInvertedExpImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t OriginalValue = CE->getValue();
uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
return isExpImmValue<shift>(InvertedValue);
}
bool isPKHLSLImm() const {
return isImmediate<0, 32>();
}
bool isPKHASRImm() const {
return isImmediate<0, 33>();
}
bool isAdrLabel() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
// If it is a constant, it must fit into a modified immediate encoding.
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return (ARM_AM::getSOImmVal(Value) != -1 ||
ARM_AM::getSOImmVal(-Value) != -1);
}
bool isT2SOImm() const {
// If we have an immediate that's not a constant, treat it as an expression
// needing a fixup.
if (isImm() && !isa<MCConstantExpr>(getImm())) {
// We want to avoid matching :upper16: and :lower16: as we want these
// expressions to match in isImm0_65535Expr()
const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
}
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) != -1;
}
bool isT2SOImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(~Value) != -1;
}
bool isT2SOImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
// Only use this when not representable as a plain so_imm.
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(-Value) != -1;
}
bool isSetEndImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value == 1 || Value == 0;
}
bool isReg() const override { return Kind == k_Register; }
bool isRegList() const { return Kind == k_RegisterList; }
bool isRegListWithAPSR() const {
return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
}
bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
bool isToken() const override { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
bool isMem() const override {
return isGPRMem() || isMVEMem();
}
bool isMVEMem() const {
if (Kind != k_Memory)
return false;
if (Memory.BaseRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
return false;
if (Memory.OffsetRegNum &&
!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.OffsetRegNum))
return false;
return true;
}
bool isGPRMem() const {
if (Kind != k_Memory)
return false;
if (Memory.BaseRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
return false;
if (Memory.OffsetRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
return false;
return true;
}
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const {
return Kind == k_ShiftedRegister &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedReg.SrcReg) &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedReg.ShiftReg);
}
bool isRegShiftedImm() const {
return Kind == k_ShiftedImmediate &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedImm.SrcReg);
}
bool isRotImm() const { return Kind == k_RotateImmediate; }
template<unsigned Min, unsigned Max>
bool isPowerTwoInRange() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
Value >= Min && Value <= Max;
}
bool isModImm() const { return Kind == k_ModifiedImmediate; }
bool isModImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(~Value) != -1;
}
bool isModImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(Value) == -1 &&
ARM_AM::getSOImmVal(-Value) != -1;
}
bool isThumbModImmNeg1_7() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int32_t Value = -(int32_t)CE->getValue();
return 0 < Value && Value < 8;
}
bool isThumbModImmNeg8_255() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int32_t Value = -(int32_t)CE->getValue();
return 7 < Value && Value < 256;
}
bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const {
return Kind == k_PostIndexRegister &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
}
bool isPostIdxReg() const {
return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base register must be PC.
if (Memory.BaseRegNum != ARM::PC)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val > -4096 && Val < 4096) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
bool isAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128or256() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
return true;
return isMemNoOffset(false, 0);
}
bool isAddrMode2() const {
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val > -4096 && Val < 4096;
}
bool isAM2OffsetImm() const {
if (!isImm()) return false;
// Immediate offset in range [-4095, 4095].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -4096 && Val < 4096);
}
bool isAddrMode3() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
// have to check for this too.
return (Val > -256 && Val < 256) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAM3Offset() const {
if (isPostIdxReg())
return true;
if (!isImm())
return false;
// Immediate offset in range [-255, 255].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
// Special case, #-0 is std::numeric_limits<int32_t>::min().
return (Val > -256 && Val < 256) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAddrMode5() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-1020, 1020] and a multiple of 4.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAddrMode5FP16() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-510, 510] and a multiple of 2.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isMemTBB() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
bool isMemTBH() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
Memory.Alignment != 0 )
return false;
return true;
}
bool isMemRegOffset() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
bool isT2MemRegOffset() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
if (Memory.ShiftType == ARM_AM::no_shift)
return true;
if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
return false;
return true;
}
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return isARMLowRegister(Memory.BaseRegNum) &&
(!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
}
bool isMemThumbRIs4() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 124].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 124 && (Val % 4) == 0;
}
bool isMemThumbRIs2() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 62].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 62 && (Val % 2) == 0;
}
bool isMemThumbRIs1() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 31].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 31;
}
bool isMemThumbSPI() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
}
bool isMemImm8s4Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// Special case, #-0 is std::numeric_limits<int32_t>::min().
return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isMemImm7s4Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
// Immediate offset a multiple of 4 in range [-508, 508].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// Special case, #-0 is INT32_MIN.
return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
}
bool isMemImm0_1020s4Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
}
bool isMemImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -256 && Val < 256);
}
template<unsigned Bits, unsigned RegClassID>
bool isMemImm7ShiftedOffset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
!ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
return false;
// Expect an immediate offset equal to an element of the range
// [-127, 127], shifted left by Bits.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// INT32_MIN is a special-case value (indicating the encoding with
// zero offset and the subtract bit set)
if (Val == INT32_MIN)
return true;
unsigned Divisor = 1U << Bits;
// Check that the low bits are zero
if (Val % Divisor != 0)
return false;
// Check that the remaining offset is within range.
Val /= Divisor;
return (Val >= -127 && Val <= 127);
}
template <int shift> bool isMemRegRQOffset() const {
if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
return false;
if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.OffsetRegNum))
return false;
if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
return false;
if (shift > 0 &&
(Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
return false;
return true;
}
template <int shift> bool isMemRegQOffset() const {
if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
return false;
if(!Memory.OffsetImm) return true;
static_assert(shift < 56,
"Such that we dont shift by a value higher than 62");
int64_t Val = Memory.OffsetImm->getValue();
// The value must be a multiple of (1 << shift)
if ((Val & ((1U << shift) - 1)) != 0)
return false;
// And be in the right range, depending on the amount that it is shifted
// by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
// separately.
int64_t Range = (1U << (7+shift)) - 1;
return (Val == INT32_MIN) || (Val > -Range && Val < Range);
}
bool isMemPosImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val < 256;
}
bool isMemNegImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, -1].
if (!Memory.OffsetImm) return false;
int64_t Val = Memory.OffsetImm->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -256 && Val < 0);
}
bool isMemUImm12Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= 0 && Val < 4096);
}
bool isMemImm12Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val > -4096 && Val < 4096) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isConstPoolAsmImm() const {
// Delay processing of Constant Pool Immediate, this will turn into
// a constant. Match no other operand
return (isConstantPoolImm());
}
bool isPostIdxImm8() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return (Val > -256 && Val < 256) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isPostIdxImm8s4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isMSRMask() const { return Kind == k_MSRMask; }
bool isBankedReg() const { return Kind == k_BankedReg; }
bool isProcIFlags() const { return Kind == k_ProcIFlags; }
// NEON operands.
bool isSingleSpacedVectorList() const {
return Kind == k_VectorList && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorList() const {
return Kind == k_VectorList && VectorList.isDoubleSpaced;
}
bool isVecListOneD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 1;
}
bool isVecListTwoMQ() const {
return isSingleSpacedVectorList() && VectorList.Count == 2 &&
ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
VectorList.RegNum);
}
bool isVecListDPair() const {
if (!isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListThreeD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 3;
}
bool isVecListFourD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 4;
}
bool isVecListDPairSpaced() const {
if (Kind != k_VectorList) return false;
if (isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListThreeQ() const {
if (!isDoubleSpacedVectorList()) return false;
return VectorList.Count == 3;
}
bool isVecListFourQ() const {
if (!isDoubleSpacedVectorList()) return false;
return VectorList.Count == 4;
}
bool isVecListFourMQ() const {
return isSingleSpacedVectorList() && VectorList.Count == 4 &&
ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
VectorList.RegNum);
}
bool isSingleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
}
bool isVecListOneDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 1;
}
bool isVecListDPairAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListDPairSpacedAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 2;
}
bool isVecListThreeDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 3;
}
bool isVecListThreeQAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 3;
}
bool isVecListFourDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 4;
}
bool isVecListFourQAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 4;
}
bool isSingleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
}
bool isVecListOneDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
}
bool isVecListOneDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
}
bool isVecListOneDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
}
bool isVecListTwoDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
}
bool isVecListTwoDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
}
bool isVecListTwoQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
}
bool isVecListTwoQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
}
bool isVecListTwoDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
}
bool isVecListThreeDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
}
bool isVecListThreeDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
}
bool isVecListThreeQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
}
bool isVecListThreeQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
}
bool isVecListThreeDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
}
bool isVecListFourDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
}
bool isVecListFourDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
}
bool isVecListFourQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
}
bool isVecListFourQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
}
bool isVecListFourDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
}
bool isVectorIndex() const { return Kind == k_VectorIndex; }
template <unsigned NumLanes>
bool isVectorIndexInRange() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < NumLanes;
}
bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
template<int PermittedValue, int OtherPermittedValue>
bool isMVEPairVectorIndex() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val == PermittedValue ||
VectorIndex.Val == OtherPermittedValue;
}
bool isNEONi8splat() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
int64_t Value = CE->getValue();
// i8 value splatted across 8 bytes. The immediate is just the 8 byte
// value.
return Value >= 0 && Value < 256;
}
bool isNEONi16splat() const {
if (isNEONByteReplicate(2))
return false; // Leave that for bytes replication and forbid by default.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi16splat(Value);
}
bool isNEONi16splatNot() const {
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi16splat(~Value & 0xffff);
}
bool isNEONi32splat() const {
if (isNEONByteReplicate(4))
return false; // Leave that for bytes replication and forbid by default.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi32splat(Value);
}
bool isNEONi32splatNot() const {
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi32splat(~Value);
}
static bool isValidNEONi32vmovImm(int64_t Value) {
// i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
// for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
return ((Value & 0xffffffffffffff00) == 0) ||
((Value & 0xffffffffffff00ff) == 0) ||
((Value & 0xffffffffff00ffff) == 0) ||
((Value & 0xffffffff00ffffff) == 0) ||
((Value & 0xffffffffffff00ff) == 0xff) ||
((Value & 0xffffffffff00ffff) == 0xffff);
}
bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
assert((Width == 8 || Width == 16 || Width == 32) &&
"Invalid element width");
assert(NumElems * Width <= 64 && "Invalid result width");
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE)
return false;
int64_t Value = CE->getValue();
if (!Value)
return false; // Don't bother with zero.
if (Inv)
Value = ~Value;
uint64_t Mask = (1ull << Width) - 1;
uint64_t Elem = Value & Mask;
if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
return false;
if (Width == 32 && !isValidNEONi32vmovImm(Elem))
return false;
for (unsigned i = 1; i < NumElems; ++i) {
Value >>= Width;
if ((Value & Mask) != Elem)
return false;
}
return true;
}
bool isNEONByteReplicate(unsigned NumBytes) const {
return isNEONReplicate(8, NumBytes, false);
}
static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
assert((FromW == 8 || FromW == 16 || FromW == 32) &&
"Invalid source width");
assert((ToW == 16 || ToW == 32 || ToW == 64) &&
"Invalid destination width");
assert(FromW < ToW && "ToW is not less than FromW");
}
template<unsigned FromW, unsigned ToW>
bool isNEONmovReplicate() const {
checkNeonReplicateArgs(FromW, ToW);
if (ToW == 64 && isNEONi64splat())
return false;
return isNEONReplicate(FromW, ToW / FromW, false);
}
template<unsigned FromW, unsigned ToW>
bool isNEONinvReplicate() const {
checkNeonReplicateArgs(FromW, ToW);
return isNEONReplicate(FromW, ToW / FromW, true);
}
bool isNEONi32vmov() const {
if (isNEONByteReplicate(4))
return false; // Let it to be classified as byte-replicate case.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE)
return false;
return isValidNEONi32vmovImm(CE->getValue());
}
bool isNEONi32vmovNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
return isValidNEONi32vmovImm(~CE->getValue());
}
bool isNEONi64splat() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
uint64_t Value = CE->getValue();
// i64 value with each byte being either 0 or 0xff.
for (unsigned i = 0; i < 8; ++i, Value >>= 8)
if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
return true;
}
template<int64_t Angle, int64_t Remainder>
bool isComplexRotation() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
return (Value % Angle == Remainder && Value <= 270);
}
bool isMVELongShift() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
uint64_t Value = CE->getValue();
return Value >= 1 && Value <= 32;
}
bool isITCondCodeNoAL() const {
if (!isITCondCode()) return false;
ARMCC::CondCodes CC = getCondCode();
return CC != ARMCC::AL;
}
bool isITCondCodeRestrictedI() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::EQ || CC == ARMCC::NE;
}
bool isITCondCodeRestrictedS() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
CC == ARMCC::GE;
}
bool isITCondCodeRestrictedU() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::HS || CC == ARMCC::HI;
}
bool isITCondCodeRestrictedFP() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
}
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
if (!Expr)
Inst.addOperand(MCOperand::createImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::createImm(CE->getValue()));
else
Inst.addOperand(MCOperand::createExpr(Expr));
}
void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
void addCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
Inst.addOperand(MCOperand::createReg(RegNum));
}
void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
Inst.addOperand(MCOperand::createReg(RegNum));
}
void addVPTPredROperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
addVPTPredNOperands(Inst, N-1);
unsigned RegNum;
if (getVPTPred() == ARMVCC::None) {
RegNum = 0;
} else {
unsigned NextOpIndex = Inst.getNumOperands();
const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
assert(TiedOp >= 0 &&
"Inactive register in vpred_r is not tied to an output!");
RegNum = Inst.getOperand(TiedOp).getReg();
}
Inst.addOperand(MCOperand::createReg(RegNum));
}
void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getCoproc()));
}
void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getCoproc()));
}
void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
}
void addITMaskOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(ITMask.Mask));
}
void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
}
void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
}
void addCCOutOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getReg()));
}
void addRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getReg()));
}
void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
assert(isRegShiftedReg() &&
"addRegShiftedRegOperands() on non-RegShiftedReg!");
Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
Inst.addOperand(MCOperand::createImm(
ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
}
void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
assert(isRegShiftedImm() &&
"addRegShiftedImmOperands() on non-RegShiftedImm!");
Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
// Shift of #32 is encoded as 0 where permitted
unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
Inst.addOperand(MCOperand::createImm(
ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
}
void addShifterImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
ShifterImm.Imm));
}
void addRegListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const SmallVectorImpl<unsigned> &RegList = getRegList();
for (SmallVectorImpl<unsigned>::const_iterator
I = RegList.begin(), E = RegList.end(); I != E; ++I)
Inst.addOperand(MCOperand::createReg(*I));
}
void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const SmallVectorImpl<unsigned> &RegList = getRegList();
for (SmallVectorImpl<unsigned>::const_iterator
I = RegList.begin(), E = RegList.end(); I != E; ++I)
Inst.addOperand(MCOperand::createReg(*I));
}
void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
addRegListOperands(Inst, N);
}
void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
addRegListOperands(Inst, N);
}
void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
addRegListOperands(Inst, N);
}
void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
addRegListOperands(Inst, N);
}
void addRotImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// Encoded as val>>3. The printer handles display as 8, 16, 24.
Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
}
void addModImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// Support for fixups (MCFixup)
if (isImm())
return addImmOperands(Inst, N);
Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
}
void addModImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
Inst.addOperand(MCOperand::createImm(Enc));
}
void addModImmNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
Inst.addOperand(MCOperand::createImm(Enc));
}
void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
uint32_t Val = -CE->getValue();
Inst.addOperand(MCOperand::createImm(Val));
}
void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
uint32_t Val = -CE->getValue();
Inst.addOperand(MCOperand::createImm(Val));
}
void addBitfieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// Munge the lsb/width into a bitfield mask.
unsigned lsb = Bitfield.LSB;
unsigned width = Bitfield.Width;
// Make a 32-bit mask w/ the referenced bits clear and all other bits set.
uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
(32 - (lsb + width)));
Inst.addOperand(MCOperand::createImm(Mask));
}
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
void addFBits16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
}
void addFBits32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
}
void addFPImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
Inst.addOperand(MCOperand::createImm(Val));
}
void addImm8s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// FIXME: We really want to scale the value here, but the LDRD/STRD
// instruction don't encode operands that way yet.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm7s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
// instruction don't encode operands that way yet.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE != nullptr && "Invalid operand type!");
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE != nullptr && "Invalid operand type!");
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE != nullptr && "Invalid operand type!");
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm7Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE != nullptr && "Invalid operand type!");
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
// in the MCInst as such. Lop off the low two bits here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
}
void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
// in the MCInst as such. Lop off the low two bits here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
}
void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
// in the MCInst as such. Lop off the low two bits here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
}
void addImm1_16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate-1, and we store in the instruction
// the bits as encoded, so subtract off one here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
}
void addImm1_32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate-1, and we store in the instruction
// the bits as encoded, so subtract off one here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
}
void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate, except for 32, which encodes as
// zero.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Imm = CE->getValue();
Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
}
void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// An ASR value of 32 encodes as 0, so that's how we want to add it to
// the instruction as well.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int Val = CE->getValue();
Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
}
void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a t2_so_imm, but we have its bitwise
// negation in the assembly source, so twiddle it here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
}
void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a t2_so_imm, but we have its
// negation in the assembly source, so twiddle it here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
}
void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually an imm0_4095, but we have its
// negation in the assembly source, so twiddle it here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
}
void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
return;
}
const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
assert(SR && "Unknown value type!");
Inst.addOperand(MCOperand::createExpr(SR));
}
void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
if (isImm()) {
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (CE) {
Inst.addOperand(MCOperand::createImm(CE->getValue()));
return;
}
const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
assert(SR && "Unknown value type!");
Inst.addOperand(MCOperand::createExpr(SR));
return;
}
assert(isGPRMem() && "Unknown value type!");
assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
}
void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
}
void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
}
void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
}
void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
}
void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
}
void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
}
void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
}
void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
int32_t Imm = Memory.OffsetImm->getValue();
Inst.addOperand(MCOperand::createImm(Imm));
}
void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(isImm() && "Not an immediate!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup.
if (!isa<MCConstantExpr>(getImm())) {
Inst.addOperand(MCOperand::createExpr(getImm()));
return;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int Val = CE->getValue();
Inst.addOperand(MCOperand::createImm(Val));
}
void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Memory.Alignment));
}
void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
if (!Memory.OffsetRegNum) {
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
} else {
// For register offset, we encode the shift type and negation flag
// here.
Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
Memory.ShiftImm, Memory.ShiftType);
}
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE && "non-constant AM2OffsetImm operand!");
int32_t Val = CE->getValue();
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
Inst.addOperand(MCOperand::createReg(0));
Inst.addOperand(MCOperand::createImm(Val));
}
void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm()) {
Inst.addOperand(MCOperand::createExpr(getImm()));
Inst.addOperand(MCOperand::createReg(0));
Inst.addOperand(MCOperand::createImm(0));
return;
}
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
if (!Memory.OffsetRegNum) {
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM3Opc(AddSub, Val);
} else {
// For register offset, we encode the shift type and negation flag
// here.
Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
}
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
if (Kind == k_PostIndexRegister) {
int32_t Val =
ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
Inst.addOperand(MCOperand::createImm(Val));
return;
}
// Constant offset.
const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
int32_t Val = CE->getValue();
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM3Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(0));
Inst.addOperand(MCOperand::createImm(Val));
}
void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm()) {
Inst.addOperand(MCOperand::createExpr(getImm()));
Inst.addOperand(MCOperand::createImm(0));
return;
}
// The lower two bits are always zero and as such are not encoded.
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM5Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm()) {
Inst.addOperand(MCOperand::createExpr(getImm()));
Inst.addOperand(MCOperand::createImm(0));
return;
}
// The lower bit is always zero and as such is not encoded.
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm()) {
Inst.addOperand(MCOperand::createExpr(getImm()));
Inst.addOperand(MCOperand::createImm(0));
return;
}
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm()) {
Inst.addOperand(MCOperand::createExpr(getImm()));
Inst.addOperand(MCOperand::createImm(0));
return;
}
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// The lower two bits are always zero and as such are not encoded.
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
}
void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If this is an immediate, it's a label reference.
if (isImm()) {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::createImm(0));
return;
}
// Otherwise, it's a normal memory reg+offset.
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If this is an immediate, it's a label reference.
if (isImm()) {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::createImm(0));
return;
}
// Otherwise, it's a normal memory reg+offset.
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// This is container for the immediate that we will create the constant
// pool from
addExpr(Inst, getConstantPoolImm());
return;
}
void addMemTBBOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
}
void addMemTBHOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
}
void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
unsigned Val =
ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
Memory.ShiftImm, Memory.ShiftType);
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
}
void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
}
void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE && "non-constant post-idx-imm8 operand!");
int Imm = CE->getValue();
bool isAdd = Imm >= 0;
if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
Inst.addOperand(MCOperand::createImm(Imm));
}
void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert(CE && "non-constant post-idx-imm8s4 operand!");
int Imm = CE->getValue();
bool isAdd = Imm >= 0;
if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
// Immediate is scaled by 4.
Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
Inst.addOperand(MCOperand::createImm(Imm));
}
void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
}
void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
// The sign, shift type, and shift amount are encoded in a single operand
// using the AM2 encoding helpers.
ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
PostIdxReg.ShiftTy);
Inst.addOperand(MCOperand::createImm(Imm));
}
void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
}
void addBankedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
}
void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
}
void addVecListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
}
void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// When we come here, the VectorList field will identify a range
// of q-registers by its base register and length, and it will
// have already been error-checked to be the expected length of
// range and contain only q-regs in the range q0-q7. So we can
// count on the base register being in the range q0-q6 (for 2
// regs) or q0-q4 (for 4)
//
// The MVE instructions taking a register range of this kind will
// need an operand in the QQPR or QQQQPR class, representing the
// entire range as a unit. So we must translate into that class,
// by finding the index of the base register in the MQPR reg
// class, and returning the super-register at the corresponding
// index in the target class.
const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
const MCRegisterClass *RC_out = (VectorList.Count == 2) ?
&ARMMCRegisterClasses[ARM::QQPRRegClassID] :
&ARMMCRegisterClasses[ARM::QQQQPRRegClassID];
unsigned I, E = RC_out->getNumRegs();
for (I = 0; I < E; I++)
if (RC_in->getRegister(I) == VectorList.RegNum)
break;
assert(I < E && "Invalid vector list start register!");
Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
}
void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
}
void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
// Mask in that this is an i8 splat.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
}
void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = CE->getValue();
Value = ARM_AM::encodeNEONi16splat(Value);
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = CE->getValue();
Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = CE->getValue();
Value = ARM_AM::encodeNEONi32splat(Value);
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = CE->getValue();
Value = ARM_AM::encodeNEONi32splat(~Value);
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
Inst.getOpcode() == ARM::VMOVv16i8) &&
"All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.");
unsigned Value = CE->getValue();
if (Inv)
Value = ~Value;
unsigned B = Value & 0xff;
B |= 0xe00; // cmode = 0b1110
Inst.addOperand(MCOperand::createImm(B));
}
void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addNEONi8ReplicateOperands(Inst, true);
}
static unsigned encodeNeonVMOVImmediate(unsigned Value) {
if (Value >= 256 && Value <= 0xffff)
Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
else if (Value > 0xffff && Value <= 0xffffff)
Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
else if (Value > 0xffffff)
Value = (Value >> 24) | 0x600;
return Value;
}
void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addNEONi8ReplicateOperands(Inst, false);
}
void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
Inst.getOpcode() == ARM::VMOVv8i16 ||
Inst.getOpcode() == ARM::VMVNv4i16 ||
Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.");
uint64_t Value = CE->getValue();
unsigned Elem = Value & 0xffff;
if (Elem >= 256)
Elem = (Elem >> 8) | 0x200;
Inst.addOperand(MCOperand::createImm(Elem));
}
void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
Inst.addOperand(MCOperand::createImm(Value));
}
void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
Inst.getOpcode() == ARM::VMOVv4i32 ||
Inst.getOpcode() == ARM::VMVNv2i32 ||
Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.");
uint64_t Value = CE->getValue();
unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
Inst.addOperand(MCOperand::createImm(Elem));
}
void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
uint64_t Value = CE->getValue();
unsigned Imm = 0;
for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
Imm |= (Value & 1) << i;
}
Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
}
void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
}
void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
}
void print(raw_ostream &OS) const override;
static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_ITCondMask);
Op->ITMask.Mask = Mask;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
SMLoc S) {
auto Op = make_unique<ARMOperand>(k_CondCode);
Op->CC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
SMLoc S) {
auto Op = make_unique<ARMOperand>(k_VPTPred);
Op->VCC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_CoprocNum);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_CoprocReg);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
SMLoc E) {
auto Op = make_unique<ARMOperand>(k_CoprocOption);
Op->Cop.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_CCOut);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_Token);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
SMLoc E) {
auto Op = make_unique<ARMOperand>(k_Register);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
SMLoc E) {
auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
Op->RegShiftedReg.ShiftTy = ShTy;
Op->RegShiftedReg.SrcReg = SrcReg;
Op->RegShiftedReg.ShiftReg = ShiftReg;
Op->RegShiftedReg.ShiftImm = ShiftImm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
unsigned ShiftImm, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
Op->RegShiftedImm.ShiftTy = ShTy;
Op->RegShiftedImm.SrcReg = SrcReg;
Op->RegShiftedImm.ShiftImm = ShiftImm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
Op->ShifterImm.isASR = isASR;
Op->ShifterImm.Imm = Imm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
SMLoc E) {
auto Op = make_unique<ARMOperand>(k_RotateImmediate);
Op->RotImm.Imm = Imm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
Op->ModImm.Bits = Bits;
Op->ModImm.Rot = Rot;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
Op->Bitfield.LSB = LSB;
Op->Bitfield.Width = Width;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
SMLoc StartLoc, SMLoc EndLoc) {
assert(Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
Regs.front().second)) {
if (Regs.back().second == ARM::VPR)
Kind = k_FPDRegisterListWithVPR;
else
Kind = k_DPRRegisterList;
} else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
Regs.front().second)) {
if (Regs.back().second == ARM::VPR)
Kind = k_FPSRegisterListWithVPR;
else
Kind = k_SPRRegisterList;
}
// Sort based on the register encoding values.
array_pod_sort(Regs.begin(), Regs.end());
if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
Kind = k_RegisterListWithAPSR;
auto Op = make_unique<ARMOperand>(Kind);
for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
I = Regs.begin(), E = Regs.end(); I != E; ++I)
Op->Registers.push_back(I->second);
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
return Op;
}
static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_VectorList);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
bool isDoubleSpaced, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.LaneIndex = Index;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = make_unique<ARMOperand>(k_VectorIndex);
Op->VectorIndex.Val = Idx;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
SMLoc E) {
auto Op = make_unique<ARMOperand>(k_Immediate);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
auto Op = make_unique<ARMOperand>(k_Memory);
Op->Memory.BaseRegNum = BaseRegNum;
Op->Memory.OffsetImm = OffsetImm;
Op->Memory.OffsetRegNum = OffsetRegNum;
Op->Memory.ShiftType = ShiftType;
Op->Memory.ShiftImm = ShiftImm;
Op->Memory.Alignment = Alignment;
Op->Memory.isNegative = isNegative;
Op->StartLoc = S;
Op->EndLoc = E;
Op->AlignmentLoc = AlignmentLoc;
return Op;
}
static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
unsigned ShiftImm, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
Op->PostIdxReg.RegNum = RegNum;
Op->PostIdxReg.isAdd = isAdd;
Op->PostIdxReg.ShiftTy = ShiftTy;
Op->PostIdxReg.ShiftImm = ShiftImm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
SMLoc S) {
auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
Op->MBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
Op->ISBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand>
CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
Op->TSBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
SMLoc S) {
auto Op = make_unique<ARMOperand>(k_ProcIFlags);
Op->IFlags.Val = IFlags;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_MSRMask);
Op->MMask.Val = MMask;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_BankedReg);
Op->BankedReg.Val = Reg;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
};
} // end anonymous namespace.
void ARMOperand::print(raw_ostream &OS) const {
auto RegName = [](unsigned Reg) {
if (Reg)
return ARMInstPrinter::getRegisterName(Reg);
else
return "noreg";
};
switch (Kind) {
case k_CondCode:
OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
break;
case k_VPTPred:
OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
break;
case k_CCOut:
OS << "<ccout " << RegName(getReg()) << ">";
break;
case k_ITCondMask: {
static const char *const MaskStr[] = {
"(invalid)", "(tttt)", "(ttt)", "(ttte)",
"(tt)", "(ttet)", "(tte)", "(ttee)",
"(t)", "(tett)", "(tet)", "(tete)",
"(te)", "(teet)", "(tee)", "(teee)",
};
assert((ITMask.Mask & 0xf) == ITMask.Mask);
OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
break;
}
case k_CoprocNum:
OS << "<coprocessor number: " << getCoproc() << ">";
break;
case k_CoprocReg:
OS << "<coprocessor register: " << getCoproc() << ">";
break;
case k_CoprocOption:
OS << "<coprocessor option: " << CoprocOption.Val << ">";
break;
case k_MSRMask:
OS << "<mask: " << getMSRMask() << ">";
break;
case k_BankedReg:
OS << "<banked reg: " << getBankedReg() << ">";
break;
case k_Immediate:
OS << *getImm();
break;
case k_MemBarrierOpt:
OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
break;
case k_InstSyncBarrierOpt:
OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
break;
case k_TraceSyncBarrierOpt:
OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
break;
case k_Memory:
OS << "<memory";
if (Memory.BaseRegNum)
OS << " base:" << RegName(Memory.BaseRegNum);
if (Memory.OffsetImm)
OS << " offset-imm:" << *Memory.OffsetImm;
if (Memory.OffsetRegNum)
OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
<< RegName(Memory.OffsetRegNum);
if (Memory.ShiftType != ARM_AM::no_shift) {
OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
OS << " shift-imm:" << Memory.ShiftImm;
}
if (Memory.Alignment)
OS << " alignment:" << Memory.Alignment;
OS << ">";
break;
case k_PostIndexRegister:
OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
<< RegName(PostIdxReg.RegNum);
if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
<< PostIdxReg.ShiftImm;
OS << ">";
break;
case k_ProcIFlags: {
OS << "<ARM_PROC::";
unsigned IFlags = getProcIFlags();
for (int i=2; i >= 0; --i)
if (IFlags & (1 << i))
OS << ARM_PROC::IFlagsToString(1 << i);
OS << ">";
break;
}
case k_Register:
OS << "<register " << RegName(getReg()) << ">";
break;
case k_ShifterImmediate:
OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
<< " #" << ShifterImm.Imm << ">";
break;
case k_ShiftedRegister:
OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
<< ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
<< RegName(RegShiftedReg.ShiftReg) << ">";
break;
case k_ShiftedImmediate:
OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
<< ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
<< RegShiftedImm.ShiftImm << ">";
break;
case k_RotateImmediate:
OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
break;
case k_ModifiedImmediate:
OS << "<mod_imm #" << ModImm.Bits << ", #"
<< ModImm.Rot << ")>";
break;
case k_ConstantPoolImmediate:
OS << "<constant_pool_imm #" << *getConstantPoolImm();
break;
case k_BitfieldDescriptor:
OS << "<bitfield " << "lsb: " << Bitfield.LSB
<< ", width: " << Bitfield.Width << ">";
break;
case k_RegisterList:
case k_RegisterListWithAPSR:
case k_DPRRegisterList:
case k_SPRRegisterList:
case k_FPSRegisterListWithVPR:
case k_FPDRegisterListWithVPR: {
OS << "<register_list ";
const SmallVectorImpl<unsigned> &RegList = getRegList();
for (SmallVectorImpl<unsigned>::const_iterator
I = RegList.begin(), E = RegList.end(); I != E; ) {
OS << RegName(*I);
if (++I < E) OS << ", ";
}
OS << ">";
break;
}
case k_VectorList:
OS << "<vector_list " << VectorList.Count << " * "
<< RegName(VectorList.RegNum) << ">";
break;
case k_VectorListAllLanes:
OS << "<vector_list(all lanes) " << VectorList.Count << " * "
<< RegName(VectorList.RegNum) << ">";
break;
case k_VectorListIndexed:
OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
<< VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
break;
case k_Token:
OS << "'" << getToken() << "'";
break;
case k_VectorIndex:
OS << "<vectorindex " << getVectorIndex() << ">";
break;
}
}
/// @name Auto-generated Match Functions
/// {
static unsigned MatchRegisterName(StringRef Name);
/// }
bool ARMAsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
const AsmToken &Tok = getParser().getTok();
StartLoc = Tok.getLoc();
EndLoc = Tok.getEndLoc();
RegNo = tryParseRegister();
return (RegNo == (unsigned)-1);
}
/// Try to parse a register name. The token must be an Identifier when called,
/// and if it is a register name the token is eaten and the register number is
/// returned. Otherwise return -1.
int ARMAsmParser::tryParseRegister() {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) return -1;
std::string lowerCase = Tok.getString().lower();
unsigned RegNum = MatchRegisterName(lowerCase);
if (!RegNum) {
RegNum = StringSwitch<unsigned>(lowerCase)
.Case("r13", ARM::SP)
.Case("r14", ARM::LR)
.Case("r15", ARM::PC)
.Case("ip", ARM::R12)
// Additional register name aliases for 'gas' compatibility.
.Case("a1", ARM::R0)
.Case("a2", ARM::R1)
.Case("a3", ARM::R2)
.Case("a4", ARM::R3)
.Case("v1", ARM::R4)
.Case("v2", ARM::R5)
.Case("v3", ARM::R6)
.Case("v4", ARM::R7)
.Case("v5", ARM::R8)
.Case("v6", ARM::R9)
.Case("v7", ARM::R10)
.Case("v8", ARM::R11)
.Case("sb", ARM::R9)
.Case("sl", ARM::R10)
.Case("fp", ARM::R11)
.Default(0);
}
if (!RegNum) {
// Check for aliases registered via .req. Canonicalize to lower case.
// That's more consistent since register names are case insensitive, and
// it's how the original entry was passed in from MC/MCParser/AsmParser.
StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
// If no match, return failure.
if (Entry == RegisterReqs.end())
return -1;
Parser.Lex(); // Eat identifier token.
return Entry->getValue();
}
// Some FPUs only have 16 D registers, so D16-D31 are invalid
if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
return -1;
Parser.Lex(); // Eat identifier token.
return RegNum;
}
// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
// If a recoverable error occurs, return 1. If an irrecoverable error
// occurs, return -1. An irrecoverable error is one where tokens have been
// consumed in the process of trying to parse the shifter (i.e., when it is
// indeed a shifter operand, but malformed).
int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return -1;
std::string lowerCase = Tok.getString().lower();
ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
.Case("asl", ARM_AM::lsl)
.Case("lsl", ARM_AM::lsl)
.Case("lsr", ARM_AM::lsr)
.Case("asr", ARM_AM::asr)
.Case("ror", ARM_AM::ror)
.Case("rrx", ARM_AM::rrx)
.Default(ARM_AM::no_shift);
if (ShiftTy == ARM_AM::no_shift)
return 1;
Parser.Lex(); // Eat the operator.
// The source register for the shift has already been added to the
// operand list, so we need to pop it off and combine it into the shifted
// register operand instead.
std::unique_ptr<ARMOperand> PrevOp(
(ARMOperand *)Operands.pop_back_val().release());
if (!PrevOp->isReg())
return Error(PrevOp->getStartLoc(), "shift must be of a register");
int SrcReg = PrevOp->getReg();
SMLoc EndLoc;
int64_t Imm = 0;
int ShiftReg = 0;
if (ShiftTy == ARM_AM::rrx) {
// RRX Doesn't have an explicit shift amount. The encoder expects
// the shift register to be the same as the source register. Seems odd,
// but OK.
ShiftReg = SrcReg;
} else {
// Figure out if this is shifted by a constant or a register (for non-RRX).
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar)) {
Parser.Lex(); // Eat hash.
SMLoc ImmLoc = Parser.getTok().getLoc();
const MCExpr *ShiftExpr = nullptr;
if (getParser().parseExpression(ShiftExpr, EndLoc)) {
Error(ImmLoc, "invalid immediate shift value");
return -1;
}
// The expression must be evaluatable as an immediate.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
if (!CE) {
Error(ImmLoc, "invalid immediate shift value");
return -1;
}
// Range check the immediate.
// lsl, ror: 0 <= imm <= 31
// lsr, asr: 0 <= imm <= 32
Imm = CE->getValue();
if (Imm < 0 ||
((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
Error(ImmLoc, "immediate shift value out of range");
return -1;
}
// shift by zero is a nop. Always send it through as lsl.
// ('as' compatibility)
if (Imm == 0)
ShiftTy = ARM_AM::lsl;
} else if (Parser.getTok().is(AsmToken::Identifier)) {
SMLoc L = Parser.getTok().getLoc();
EndLoc = Parser.getTok().getEndLoc();
ShiftReg = tryParseRegister();
if (ShiftReg == -1) {
Error(L, "expected immediate or register in shift operand");
return -1;
}
} else {
Error(Parser.getTok().getLoc(),
"expected immediate or register in shift operand");
return -1;
}
}
if (ShiftReg && ShiftTy != ARM_AM::rrx)
Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
ShiftReg, Imm,
S, EndLoc));
else
Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
S, EndLoc));
return 0;
}
/// Try to parse a register name. The token must be an Identifier when called.
/// If it's a register, an AsmOperand is created. Another AsmOperand is created
/// if there is a "writeback". 'true' if it's not a register.
///
/// TODO this is likely to change to allow different register types and or to
/// parse for a specific register type.
bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc RegStartLoc = Parser.getTok().getLoc();
SMLoc RegEndLoc = Parser.getTok().getEndLoc();
int RegNo = tryParseRegister();
if (RegNo == -1)
return true;
Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
const AsmToken &ExclaimTok = Parser.getTok();
if (ExclaimTok.is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
ExclaimTok.getLoc()));
Parser.Lex(); // Eat exclaim token
return false;
}
// Also check for an index operand. This is only legal for vector registers,
// but that'll get caught OK in operand matching, so we don't need to
// explicitly filter everything else out here.
if (Parser.getTok().is(AsmToken::LBrac)) {
SMLoc SIdx = Parser.getTok().getLoc();
Parser.Lex(); // Eat left bracket token.
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
return true;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
if (!MCE)
return TokError("immediate value expected for vector index");
if (Parser.getTok().isNot(AsmToken::RBrac))
return Error(Parser.getTok().getLoc(), "']' expected");
SMLoc E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
SIdx, E,
getContext()));
}
return false;
}
/// MatchCoprocessorOperandName - Try to parse an coprocessor related
/// instruction with a symbolic operand name.
/// We accept "crN" syntax for GAS compatibility.
/// <operand-name> ::= <prefix><number>
/// If CoprocOp is 'c', then:
/// <prefix> ::= c | cr
/// If CoprocOp is 'p', then :
/// <prefix> ::= p
/// <number> ::= integer in range [0, 15]
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
// Use the same layout as the tablegen'erated register name matcher. Ugly,
// but efficient.
if (Name.size() < 2 || Name[0] != CoprocOp)
return -1;
Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
switch (Name.size()) {
default: return -1;
case 1:
switch (Name[0]) {
default: return -1;
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
}
case 2:
if (Name[0] != '1')
return -1;
switch (Name[1]) {
default: return -1;
// CP10 and CP11 are VFP/NEON and so vector instructions should be used.
// However, old cores (v5/v6) did use them in that way.
case '0': return 10;
case '1': return 11;
case '2': return 12;
case '3': return 13;
case '4': return 14;
case '5': return 15;
}
}
}
/// parseITCondCode - Try to parse a condition code for an IT instruction.
OperandMatchResultTy
ARMAsmParser::parseITCondCode(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
unsigned CC = ARMCondCodeFromString(Tok.getString());
if (CC == ~0U)
return MatchOperand_NoMatch;
Parser.Lex(); // Eat the token.
Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
return MatchOperand_Success;
}
/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
/// token must be an Identifier when called, and if it is a coprocessor
/// number, the token is eaten and the operand is added to the operand list.
OperandMatchResultTy
ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
if (Num == -1)
return MatchOperand_NoMatch;
if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
return MatchOperand_Success;
}
/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
/// token must be an Identifier when called, and if it is a coprocessor
/// number, the token is eaten and the operand is added to the operand list.
OperandMatchResultTy
ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
if (Reg == -1)
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
return MatchOperand_Success;
}
/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
/// coproc_option : '{' imm0_255 '}'
OperandMatchResultTy
ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
// If this isn't a '{', this isn't a coprocessor immediate operand.
if (Parser.getTok().isNot(AsmToken::LCurly))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat the '{'
const MCExpr *Expr;
SMLoc Loc = Parser.getTok().getLoc();
if (getParser().parseExpression(Expr)) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
return MatchOperand_ParseFail;
}
int Val = CE->getValue();
// Check for and consume the closing '}'
if (Parser.getTok().isNot(AsmToken::RCurly))
return MatchOperand_ParseFail;
SMLoc E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat the '}'
Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
return MatchOperand_Success;
}
// For register list parsing, we need to map from raw GPR register numbering
// to the enumeration values. The enumeration values aren't sorted by
// register number due to our using "sp", "lr" and "pc" as canonical names.
static unsigned getNextRegister(unsigned Reg) {
// If this is a GPR, we need to do it manually, otherwise we can rely
// on the sort ordering of the enumeration since the other reg-classes
// are sane.
if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
return Reg + 1;
switch(Reg) {
default: llvm_unreachable("Invalid GPR number!");
case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
}
}
/// Parse a register list.
bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
bool EnforceOrder) {
MCAsmParser &Parser = getParser();
if (Parser.getTok().isNot(AsmToken::LCurly))
return TokError("Token is not a Left Curly Brace");
SMLoc S = Parser.getTok().getLoc();
Parser.Lex(); // Eat '{' token.
SMLoc RegLoc = Parser.getTok().getLoc();
// Check the first register in the list to see what register class
// this is a list of.
int Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
// The reglist instructions have at most 16 registers, so reserve
// space for that many.
int EReg = 0;
SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
// Allow Q regs and just interpret them as the two D sub-registers.
if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
Reg = getDRegFromQReg(Reg);
EReg = MRI->getEncodingValue(Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
++Reg;
}
const MCRegisterClass *RC;
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
else
return Error(RegLoc, "invalid register in register list");
// Store the register.
EReg = MRI->getEncodingValue(Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
// This starts immediately after the first register token in the list,
// so we can see either a comma or a minus (range separator) as a legal
// next token.
while (Parser.getTok().is(AsmToken::Comma) ||
Parser.getTok().is(AsmToken::Minus)) {
if (Parser.getTok().is(AsmToken::Minus)) {
Parser.Lex(); // Eat the minus.
SMLoc AfterMinusLoc = Parser.getTok().getLoc();
int EndReg = tryParseRegister();
if (EndReg == -1)
return Error(AfterMinusLoc, "register expected");
// Allow Q regs and just interpret them as the two D sub-registers.
if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
continue;
// The register must be in the same register class as the first.
if (!RC->contains(EndReg))
return Error(AfterMinusLoc, "invalid register in register list");
// Ranges must go from low to high.
if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
return Error(AfterMinusLoc, "bad range in register list");
// Add all the registers in the range to the register list.
while (Reg != EndReg) {
Reg = getNextRegister(Reg);
EReg = MRI->getEncodingValue(Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
}
continue;
}
Parser.Lex(); // Eat the comma.
RegLoc = Parser.getTok().getLoc();
int OldReg = Reg;
const AsmToken RegTok = Parser.getTok();
Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
// Allow Q regs and just interpret them as the two D sub-registers.
bool isQReg = false;
if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
Reg = getDRegFromQReg(Reg);
isQReg = true;
}
if (!RC->contains(Reg) &&
RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
// switch the register classes, as GPRwithAPSRnospRegClassID is a partial
// subset of GPRRegClassId except it contains APSR as well.
RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
}
if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) {
RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
EReg = MRI->getEncodingValue(Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
continue;
}
// The register must be in the same register class as the first.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
// In most cases, the list must be monotonically increasing. An
// exception is CLRM, which is order-independent anyway, so
// there's no potential for confusion if you write clrm {r2,r1}
// instead of clrm {r1,r2}.
if (EnforceOrder &&
MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
Warning(RegLoc, "register list not in ascending order");
else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
return Error(RegLoc, "register list not in ascending order");
}
if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
Warning(RegLoc, "duplicated register (" + RegTok.getString() +
") in register list");
continue;
}
// VFP register lists must also be contiguous.
if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
EReg = MRI->getEncodingValue(Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
if (isQReg) {
EReg = MRI->getEncodingValue(++Reg);
Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
}
}
if (Parser.getTok().isNot(AsmToken::RCurly))
return Error(Parser.getTok().getLoc(), "'}' expected");
SMLoc E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat '}' token.
// Push the register list operand.
Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
// The ARM system instruction variants for LDM/STM have a '^' token here.
if (Parser.getTok().is(AsmToken::Caret)) {
Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
Parser.Lex(); // Eat '^' token.
}
return false;
}
// Helper function to parse the lane index for vector lists.
OperandMatchResultTy ARMAsmParser::
parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
Index = 0; // Always return a defined index value.
if (Parser.getTok().is(AsmToken::LBrac)) {
Parser.Lex(); // Eat the '['.
if (Parser.getTok().is(AsmToken::RBrac)) {
// "Dn[]" is the 'all lanes' syntax.
LaneKind = AllLanes;
EndLoc = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat the ']'.
return MatchOperand_Success;
}
// There's an optional '#' token here. Normally there wouldn't be, but
// inline assemble puts one in, and it's friendly to accept that.
if (Parser.getTok().is(AsmToken::Hash))
Parser.Lex(); // Eat '#' or '$'.
const MCExpr *LaneIndex;
SMLoc Loc = Parser.getTok().getLoc();
if (getParser().parseExpression(LaneIndex)) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
if (!CE) {
Error(Loc, "lane index must be empty or an integer");
return MatchOperand_ParseFail;
}
if (Parser.getTok().isNot(AsmToken::RBrac)) {
Error(Parser.getTok().getLoc(), "']' expected");
return MatchOperand_ParseFail;
}
EndLoc = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat the ']'.
int64_t Val = CE->getValue();
// FIXME: Make this range check context sensitive for .8, .16, .32.
if (Val < 0 || Val > 7) {
Error(Parser.getTok().getLoc(), "lane index out of range");
return MatchOperand_ParseFail;
}
Index = Val;
LaneKind = IndexedLane;
return MatchOperand_Success;
}
LaneKind = NoLanes;
return MatchOperand_Success;
}
// parse a vector register list
OperandMatchResultTy
ARMAsmParser::parseVectorList(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
VectorLaneTy LaneKind;
unsigned LaneIndex;
SMLoc S = Parser.getTok().getLoc();
// As an extension (to match gas), support a plain D register or Q register
// (without encosing curly braces) as a single or double entry list,
// respectively.
if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
SMLoc E = Parser.getTok().getEndLoc();
int Reg = tryParseRegister();
if (Reg == -1)
return MatchOperand_NoMatch;
if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
if (Res != MatchOperand_Success)
return Res;
switch (LaneKind) {
case NoLanes:
Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
break;
case AllLanes:
Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
S, E));
break;
case IndexedLane:
Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
LaneIndex,
false, S, E));
break;
}
return MatchOperand_Success;
}
if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
Reg = getDRegFromQReg(Reg);
OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
if (Res != MatchOperand_Success)
return Res;
switch (LaneKind) {
case NoLanes:
Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
&ARMMCRegisterClasses[ARM::DPairRegClassID]);
Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
break;
case AllLanes:
Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
&ARMMCRegisterClasses[ARM::DPairRegClassID]);
Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
S, E));
break;
case IndexedLane:
Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
LaneIndex,
false, S, E));
break;
}
return MatchOperand_Success;
}
Error(S, "vector register expected");
return MatchOperand_ParseFail;
}
if (Parser.getTok().isNot(AsmToken::LCurly))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat '{' token.
SMLoc RegLoc = Parser.getTok().getLoc();
int Reg = tryParseRegister();
if (Reg == -1) {
Error(RegLoc, "register expected");
return MatchOperand_ParseFail;
}
unsigned Count = 1;
int Spacing = 0;
unsigned FirstReg = Reg;
if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
return MatchOperand_ParseFail;
}
// The list is of D registers, but we also allow Q regs and just interpret
// them as the two D sub-registers.
else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
FirstReg = Reg = getDRegFromQReg(Reg);
Spacing = 1; // double-spacing requires explicit D registers, otherwise
// it's ambiguous with four-register single spaced.
++Reg;
++Count;
}
SMLoc E;
if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
return MatchOperand_ParseFail;
while (Parser.getTok().is(AsmToken::Comma) ||
Parser.getTok().is(AsmToken::Minus)) {
if (Parser.getTok().is(AsmToken::Minus)) {
if (!Spacing)
Spacing = 1; // Register range implies a single spaced list.
else if (Spacing == 2) {
Error(Parser.getTok().getLoc(),
"sequential registers in double spaced list");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat the minus.
SMLoc AfterMinusLoc = Parser.getTok().getLoc();
int EndReg = tryParseRegister();
if (EndReg == -1) {
Error(AfterMinusLoc, "register expected");
return MatchOperand_ParseFail;
}
// Allow Q regs and just interpret them as the two D sub-registers.
if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
continue;
// The register must be in the same register class as the first.
if ((hasMVE() &&
!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
(!hasMVE() &&
!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
Error(AfterMinusLoc, "invalid register in register list");
return MatchOperand_ParseFail;
}
// Ranges must go from low to high.
if (Reg > EndReg) {
Error(AfterMinusLoc, "bad range in register list");
return MatchOperand_ParseFail;
}
// Parse the lane specifier if present.
VectorLaneTy NextLaneKind;
unsigned NextLaneIndex;
if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
MatchOperand_Success)
return MatchOperand_ParseFail;
if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
Error(AfterMinusLoc, "mismatched lane index in register list");
return MatchOperand_ParseFail;
}
// Add all the registers in the range to the register list.
Count += EndReg - Reg;
Reg = EndReg;
continue;
}
Parser.Lex(); // Eat the comma.
RegLoc = Parser.getTok().getLoc();
int OldReg = Reg;
Reg = tryParseRegister();
if (Reg == -1) {
Error(RegLoc, "register expected");
return MatchOperand_ParseFail;
}
if (hasMVE()) {
if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
Error(RegLoc, "vector register in range Q0-Q7 expected");
return MatchOperand_ParseFail;
}
Spacing = 1;
}
// vector register lists must be contiguous.
// It's OK to use the enumeration values directly here rather, as the
// VFP register classes have the enum sorted properly.
//
// The list is of D registers, but we also allow Q regs and just interpret
// them as the two D sub-registers.
else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
if (!Spacing)
Spacing = 1; // Register range implies a single spaced list.
else if (Spacing == 2) {
Error(RegLoc,
"invalid register in double-spaced list (must be 'D' register')");
return MatchOperand_ParseFail;
}
Reg = getDRegFromQReg(Reg);
if (Reg != OldReg + 1) {
Error(RegLoc, "non-contiguous register range");
return MatchOperand_ParseFail;
}
++Reg;
Count += 2;
// Parse the lane specifier if present.
VectorLaneTy NextLaneKind;
unsigned NextLaneIndex;
SMLoc LaneLoc = Parser.getTok().getLoc();
if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
MatchOperand_Success)
return MatchOperand_ParseFail;
if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
Error(LaneLoc, "mismatched lane index in register list");
return MatchOperand_ParseFail;
}
continue;
}
// Normal D register.
// Figure out the register spacing (single or double) of the list if
// we don't know it already.
if (!Spacing)
Spacing = 1 + (Reg == OldReg + 2);
// Just check that it's contiguous and keep going.
if (Reg != OldReg + Spacing) {
Error(RegLoc, "non-contiguous register range");
return MatchOperand_ParseFail;
}
++Count;
// Parse the lane specifier if present.
VectorLaneTy NextLaneKind;
unsigned NextLaneIndex;
SMLoc EndLoc = Parser.getTok().getLoc();
if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
return MatchOperand_ParseFail;
if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
Error(EndLoc, "mismatched lane index in register list");
return MatchOperand_ParseFail;
}
}
if (Parser.getTok().isNot(AsmToken::RCurly)) {
Error(Parser.getTok().getLoc(), "'}' expected");
return MatchOperand_ParseFail;
}
E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat '}' token.
switch (LaneKind) {
case NoLanes:
case AllLanes: {
// Two-register operands have been converted to the
// composite register classes.
if (Count == 2 && !hasMVE()) {
const MCRegisterClass *RC = (Spacing == 1) ?
&ARMMCRegisterClasses[ARM::DPairRegClassID] :
&ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
}
auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
ARMOperand::CreateVectorListAllLanes);
Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
break;
}
case IndexedLane:
Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
LaneIndex,
(Spacing == 2),
S, E));
break;
}
return MatchOperand_Success;
}
/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
OperandMatchResultTy
ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
unsigned Opt;
if (Tok.is(AsmToken::Identifier)) {
StringRef OptStr = Tok.getString();
Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
.Case("sy", ARM_MB::SY)
.Case("st", ARM_MB::ST)
.Case("ld", ARM_MB::LD)
.Case("sh", ARM_MB::ISH)
.Case("ish", ARM_MB::ISH)
.Case("shst", ARM_MB::ISHST)
.Case("ishst", ARM_MB::ISHST)
.Case("ishld", ARM_MB::ISHLD)
.Case("nsh", ARM_MB::NSH)
.Case("un", ARM_MB::NSH)
.Case("nshst", ARM_MB::NSHST)
.Case("nshld", ARM_MB::NSHLD)
.Case("unst", ARM_MB::NSHST)
.Case("osh", ARM_MB::OSH)
.Case("oshst", ARM_MB::OSHST)
.Case("oshld", ARM_MB::OSHLD)
.Default(~0U);
// ishld, oshld, nshld and ld are only available from ARMv8.
if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
Opt = ~0U;
if (Opt == ~0U)
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
} else if (Tok.is(AsmToken::Hash) ||
Tok.is(AsmToken::Dollar) ||
Tok.is(AsmToken::Integer)) {
if (Parser.getTok().isNot(AsmToken::Integer))
Parser.Lex(); // Eat '#' or '$'.
SMLoc Loc = Parser.getTok().getLoc();
const MCExpr *MemBarrierID;
if (getParser().parseExpression(MemBarrierID)) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
if (!CE) {
Error(Loc, "constant expression expected");
return MatchOperand_ParseFail;
}
int Val = CE->getValue();
if (Val & ~0xf) {
Error(Loc, "immediate value out of range");
return MatchOperand_ParseFail;
}
Opt = ARM_MB::RESERVED_0 + Val;
} else
return MatchOperand_ParseFail;
Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
if (!Tok.getString().equals_lower("csync"))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
return MatchOperand_Success;
}
/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
OperandMatchResultTy
ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
unsigned Opt;
if (Tok.is(AsmToken::Identifier)) {
StringRef OptStr = Tok.getString();
if (OptStr.equals_lower("sy"))
Opt = ARM_ISB::SY;
else
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
} else if (Tok.is(AsmToken::Hash) ||
Tok.is(AsmToken::Dollar) ||
Tok.is(AsmToken::Integer)) {
if (Parser.getTok().isNot(AsmToken::Integer))
Parser.Lex(); // Eat '#' or '$'.
SMLoc Loc = Parser.getTok().getLoc();
const MCExpr *ISBarrierID;
if (getParser().parseExpression(ISBarrierID)) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
if (!CE) {
Error(Loc, "constant expression expected");
return MatchOperand_ParseFail;
}
int Val = CE->getValue();
if (Val & ~0xf) {
Error(Loc, "immediate value out of range");
return MatchOperand_ParseFail;
}
Opt = ARM_ISB::RESERVED_0 + Val;
} else
return MatchOperand_ParseFail;
Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
(ARM_ISB::InstSyncBOpt)Opt, S));
return MatchOperand_Success;
}
/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
OperandMatchResultTy
ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef IFlagsStr = Tok.getString();
// An iflags string of "none" is interpreted to mean that none of the AIF
// bits are set. Not a terribly useful instruction, but a valid encoding.
unsigned IFlags = 0;
if (IFlagsStr != "none") {
for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
.Case("a", ARM_PROC::A)
.Case("i", ARM_PROC::I)
.Case("f", ARM_PROC::F)
.Default(~0U);
// If some specific iflag is already set, it means that some letter is
// present more than once, this is not acceptable.
if (Flag == ~0U || (IFlags & Flag))
return MatchOperand_NoMatch;
IFlags |= Flag;
}
}
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
return MatchOperand_Success;
}
/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
OperandMatchResultTy
ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.is(AsmToken::Integer)) {
int64_t Val = Tok.getIntVal();
if (Val > 255 || Val < 0) {
return MatchOperand_NoMatch;
}
unsigned SYSmvalue = Val & 0xFF;
Parser.Lex();
Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
return MatchOperand_Success;
}
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef Mask = Tok.getString();
if (isMClass()) {
auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
return MatchOperand_NoMatch;
unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
return MatchOperand_Success;
}
// Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
size_t Start = 0, Next = Mask.find('_');
StringRef Flags = "";
std::string SpecReg = Mask.slice(Start, Next).lower();
if (Next != StringRef::npos)
Flags = Mask.slice(Next+1, Mask.size());
// FlagsVal contains the complete mask:
// 3-0: Mask
// 4: Special Reg (cpsr, apsr => 0; spsr => 1)
unsigned FlagsVal = 0;
if (SpecReg == "apsr") {
FlagsVal = StringSwitch<unsigned>(Flags)
.Case("nzcvq", 0x8) // same as CPSR_f
.Case("g", 0x4) // same as CPSR_s
.Case("nzcvqg", 0xc) // same as CPSR_fs
.Default(~0U);
if (FlagsVal == ~0U) {
if (!Flags.empty())
return MatchOperand_NoMatch;
else
FlagsVal = 8; // No flag
}
} else if (SpecReg == "cpsr" || SpecReg == "spsr") {
// cpsr_all is an alias for cpsr_fc, as is plain cpsr.
if (Flags == "all" || Flags == "")
Flags = "fc";
for (int i = 0, e = Flags.size(); i != e; ++i) {
unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
.Case("c", 1)
.Case("x", 2)
.Case("s", 4)
.Case("f", 8)
.Default(~0U);
// If some specific flag is already set, it means that some letter is
// present more than once, this is not acceptable.
if (Flag == ~0U || (FlagsVal & Flag))
return MatchOperand_NoMatch;
FlagsVal |= Flag;
}
} else // No match for special register.
return MatchOperand_NoMatch;
// Special register without flags is NOT equivalent to "fc" flags.
// NOTE: This is a divergence from gas' behavior. Uncommenting the following
// two lines would enable gas compatibility at the expense of breaking
// round-tripping.
//
// if (!FlagsVal)
// FlagsVal = 0x9;
// Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
if (SpecReg == "spsr")
FlagsVal |= 16;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
return MatchOperand_Success;
}
/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
/// use in the MRS/MSR instructions added to support virtualization.
OperandMatchResultTy
ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef RegName = Tok.getString();
auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
if (!TheReg)
return MatchOperand_NoMatch;
unsigned Encoding = TheReg->Encoding;
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
int High) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
Error(Parser.getTok().getLoc(), Op + " operand expected.");
return MatchOperand_ParseFail;
}
StringRef ShiftName = Tok.getString();
std::string LowerOp = Op.lower();
std::string UpperOp = Op.upper();
if (ShiftName != LowerOp && ShiftName != UpperOp) {
Error(Parser.getTok().getLoc(), Op + " operand expected.");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat shift type token.
// There must be a '#' and a shift amount.
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
const MCExpr *ShiftAmount;
SMLoc Loc = Parser.getTok().getLoc();
SMLoc EndLoc;
if (getParser().parseExpression(ShiftAmount, EndLoc)) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
if (!CE) {
Error(Loc, "constant expression expected");
return MatchOperand_ParseFail;
}
int Val = CE->getValue();
if (Val < Low || Val > High) {
Error(Loc, "immediate value out of range");
return MatchOperand_ParseFail;
}
Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier)) {
Error(S, "'be' or 'le' operand expected");
return MatchOperand_ParseFail;
}
int Val = StringSwitch<int>(Tok.getString().lower())
.Case("be", 1)
.Case("le", 0)
.Default(-1);
Parser.Lex(); // Eat the token.
if (Val == -1) {
Error(S, "'be' or 'le' operand expected");
return MatchOperand_ParseFail;
}
Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
getContext()),
S, Tok.getEndLoc()));
return MatchOperand_Success;
}
/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
/// instructions. Legal values are:
/// lsl #n 'n' in [0,31]
/// asr #n 'n' in [1,32]
/// n == 32 encoded as n == 0.
OperandMatchResultTy
ARMAsmParser::parseShifterImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier)) {
Error(S, "shift operator 'asr' or 'lsl' expected");
return MatchOperand_ParseFail;
}
StringRef ShiftName = Tok.getString();
bool isASR;
if (ShiftName == "lsl" || ShiftName == "LSL")
isASR = false;
else if (ShiftName == "asr" || ShiftName == "ASR")
isASR = true;
else {
Error(S, "shift operator 'asr' or 'lsl' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat the operator.
// A '#' and a shift amount.
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
SMLoc ExLoc = Parser.getTok().getLoc();
const MCExpr *ShiftAmount;
SMLoc EndLoc;
if (getParser().parseExpression(ShiftAmount, EndLoc)) {
Error(ExLoc, "malformed shift expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
if (!CE) {
Error(ExLoc, "shift amount must be an immediate");
return MatchOperand_ParseFail;
}
int64_t Val = CE->getValue();
if (isASR) {
// Shift amount must be in [1,32]
if (Val < 1 || Val > 32) {
Error(ExLoc, "'asr' shift amount must be in range [1,32]");
return MatchOperand_ParseFail;
}
// asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
if (isThumb() && Val == 32) {
Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
return MatchOperand_ParseFail;
}
if (Val == 32) Val = 0;
} else {
// Shift amount must be in [1,32]
if (Val < 0 || Val > 31) {
Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
return MatchOperand_ParseFail;
}
}
Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
return MatchOperand_Success;
}
/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
/// of instructions. Legal values are:
/// ror #n 'n' in {0, 8, 16, 24}
OperandMatchResultTy
ARMAsmParser::parseRotImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef ShiftName = Tok.getString();
if (ShiftName != "ror" && ShiftName != "ROR")
return MatchOperand_NoMatch;
Parser.Lex(); // Eat the operator.
// A '#' and a rotate amount.
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
SMLoc ExLoc = Parser.getTok().getLoc();
const MCExpr *ShiftAmount;
SMLoc EndLoc;
if (getParser().parseExpression(ShiftAmount, EndLoc)) {
Error(ExLoc, "malformed rotate expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
if (!CE) {
Error(ExLoc, "rotate amount must be an immediate");
return MatchOperand_ParseFail;
}
int64_t Val = CE->getValue();
// Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
// normally, zero is represented in asm by omitting the rotate operand
// entirely.
if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
return MatchOperand_ParseFail;
}
Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parseModImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
MCAsmLexer &Lexer = getLexer();
int64_t Imm1, Imm2;
SMLoc S = Parser.getTok().getLoc();
// 1) A mod_imm operand can appear in the place of a register name:
// add r0, #mod_imm
// add r0, r0, #mod_imm
// to correctly handle the latter, we bail out as soon as we see an
// identifier.
//
// 2) Similarly, we do not want to parse into complex operands:
// mov r0, #mod_imm
// mov r0, :lower16:(_foo)
if (Parser.getTok().is(AsmToken::Identifier) ||
Parser.getTok().is(AsmToken::Colon))
return MatchOperand_NoMatch;
// Hash (dollar) is optional as per the ARMARM
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar)) {
// Avoid parsing into complex operands (#:)
if (Lexer.peekTok().is(AsmToken::Colon))
return MatchOperand_NoMatch;
// Eat the hash (dollar)
Parser.Lex();
}
SMLoc Sx1, Ex1;
Sx1 = Parser.getTok().getLoc();
const MCExpr *Imm1Exp;
if (getParser().parseExpression(Imm1Exp, Ex1)) {
Error(Sx1, "malformed expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
if (CE) {
// Immediate must fit within 32-bits
Imm1 = CE->getValue();
int Enc = ARM_AM::getSOImmVal(Imm1);
if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
// We have a match!
Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
(Enc & 0xF00) >> 7,
Sx1, Ex1));
return MatchOperand_Success;
}
// We have parsed an immediate which is not for us, fallback to a plain
// immediate. This can happen for instruction aliases. For an example,
// ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
// a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
// instruction with a mod_imm operand. The alias is defined such that the
// parser method is shared, that's why we have to do this here.
if (Parser.getTok().is(AsmToken::EndOfStatement)) {
Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
return MatchOperand_Success;
}
} else {
// Operands like #(l1 - l2) can only be evaluated at a later stage (via an
// MCFixup). Fallback to a plain immediate.
Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
return MatchOperand_Success;
}
// From this point onward, we expect the input to be a (#bits, #rot) pair
if (Parser.getTok().isNot(AsmToken::Comma)) {
Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
return MatchOperand_ParseFail;
}
if (Imm1 & ~0xFF) {
Error(Sx1, "immediate operand must a number in the range [0, 255]");
return MatchOperand_ParseFail;
}
// Eat the comma
Parser.Lex();
// Repeat for #rot
SMLoc Sx2, Ex2;
Sx2 = Parser.getTok().getLoc();
// Eat the optional hash (dollar)
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar))
Parser.Lex();
const MCExpr *Imm2Exp;
if (getParser().parseExpression(Imm2Exp, Ex2)) {
Error(Sx2, "malformed expression");
return MatchOperand_ParseFail;
}
CE = dyn_cast<MCConstantExpr>(Imm2Exp);
if (CE) {
Imm2 = CE->getValue();
if (!(Imm2 & ~0x1E)) {
// We have a match!
Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
return MatchOperand_Success;
}
Error(Sx2, "immediate operand must an even number in the range [0, 30]");
return MatchOperand_ParseFail;
} else {
Error(Sx2, "constant expression expected");
return MatchOperand_ParseFail;
}
}
OperandMatchResultTy
ARMAsmParser::parseBitfield(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
// The bitfield descriptor is really two operands, the LSB and the width.
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
const MCExpr *LSBExpr;
SMLoc E = Parser.getTok().getLoc();
if (getParser().parseExpression(LSBExpr)) {
Error(E, "malformed immediate expression");
return MatchOperand_ParseFail;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
if (!CE) {
Error(E, "'lsb' operand must be an immediate");
return MatchOperand_ParseFail;
}
int64_t LSB = CE->getValue();
// The LSB must be in the range [0,31]
if (LSB < 0 || LSB > 31) {
Error(E, "'lsb' operand must be in the range [0,31]");
return MatchOperand_ParseFail;
}
E = Parser.getTok().getLoc();
// Expect another immediate operand.
if (Parser.getTok().isNot(AsmToken::Comma)) {
Error(Parser.getTok().getLoc(), "too few operands");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
const MCExpr *WidthExpr;
SMLoc EndLoc;
if (getParser().parseExpression(WidthExpr, EndLoc)) {
Error(E, "malformed immediate expression");
return MatchOperand_ParseFail;
}
CE = dyn_cast<MCConstantExpr>(WidthExpr);
if (!CE) {
Error(E, "'width' operand must be an immediate");
return MatchOperand_ParseFail;
}
int64_t Width = CE->getValue();
// The LSB must be in the range [1,32-lsb]
if (Width < 1 || Width > 32 - LSB) {
Error(E, "'width' operand must be in the range [1,32-lsb]");
return MatchOperand_ParseFail;
}
Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
// Check for a post-index addressing register operand. Specifically:
// postidx_reg := '+' register {, shift}
// | '-' register {, shift}
// | register {, shift}
// This method must return MatchOperand_NoMatch without consuming any tokens
// in the case where there is no match, as other alternatives take other
// parse methods.
MCAsmParser &Parser = getParser();
AsmToken Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
bool haveEaten = false;
bool isAdd = true;
if (Tok.is(AsmToken::Plus)) {
Parser.Lex(); // Eat the '+' token.
haveEaten = true;
} else if (Tok.is(AsmToken::Minus)) {
Parser.Lex(); // Eat the '-' token.
isAdd = false;
haveEaten = true;
}
SMLoc E = Parser.getTok().getEndLoc();
int Reg = tryParseRegister();
if (Reg == -1) {
if (!haveEaten)
return MatchOperand_NoMatch;
Error(Parser.getTok().getLoc(), "register expected");
return MatchOperand_ParseFail;
}
ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
unsigned ShiftImm = 0;
if (Parser.getTok().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the ','.
if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
return MatchOperand_ParseFail;
// FIXME: Only approximates end...may include intervening whitespace.
E = Parser.getTok().getLoc();
}
Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
ShiftImm, S, E));
return MatchOperand_Success;
}
OperandMatchResultTy
ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
// Check for a post-index addressing register operand. Specifically:
// am3offset := '+' register
// | '-' register
// | register
// | # imm
// | # + imm
// | # - imm
// This method must return MatchOperand_NoMatch without consuming any tokens
// in the case where there is no match, as other alternatives take other
// parse methods.
MCAsmParser &Parser = getParser();
AsmToken Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
// Do immediates first, as we always parse those if we have a '#'.
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar)) {
Parser.Lex(); // Eat '#' or '$'.
// Explicitly look for a '-', as we need to encode negative zero
// differently.
bool isNegative = Parser.getTok().is(AsmToken::Minus);
const MCExpr *Offset;
SMLoc E;
if (getParser().parseExpression(Offset, E))
return MatchOperand_ParseFail;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
if (!CE) {
Error(S, "constant expression expected");
return MatchOperand_ParseFail;
}
// Negative zero is encoded as the flag value
// std::numeric_limits<int32_t>::min().
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
Val = std::numeric_limits<int32_t>::min();
Operands.push_back(
ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
return MatchOperand_Success;
}
bool haveEaten = false;
bool isAdd = true;
if (Tok.is(AsmToken::Plus)) {
Parser.Lex(); // Eat the '+' token.
haveEaten = true;
} else if (Tok.is(AsmToken::Minus)) {
Parser.Lex(); // Eat the '-' token.
isAdd = false;
haveEaten = true;
}
Tok = Parser.getTok();
int Reg = tryParseRegister();
if (Reg == -1) {
if (!haveEaten)
return MatchOperand_NoMatch;
Error(Tok.getLoc(), "register expected");
return MatchOperand_ParseFail;
}
Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
0, S, Tok.getEndLoc()));
return MatchOperand_Success;
}
/// Convert parsed operands to MCInst. Needed here because this instruction
/// only has two register operands, but multiplication is commutative so
/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
const OperandVector &Operands) {
((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
// If we have a three-operand form, make sure to set Rn to be the operand
// that isn't the same as Rd.
unsigned RegOp = 4;
if (Operands.size() == 6 &&
((ARMOperand &)*Operands[4]).getReg() ==
((ARMOperand &)*Operands[3]).getReg())
RegOp = 5;
((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
Inst.addOperand(Inst.getOperand(0));
((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
}
void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
const OperandVector &Operands) {
int CondOp = -1, ImmOp = -1;
switch(Inst.getOpcode()) {
case ARM::tB:
case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
case ARM::t2B:
case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
}
// first decide whether or not the branch should be conditional
// by looking at it's location relative to an IT block
if(inITBlock()) {
// inside an IT block we cannot have any conditional branches. any
// such instructions needs to be converted to unconditional form
switch(Inst.getOpcode()) {
case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
}
} else {
// outside IT blocks we can only have unconditional branches with AL
// condition code or conditional branches with non-AL condition code
unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
switch(Inst.getOpcode()) {
case ARM::tB:
case ARM::tBcc:
Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
break;
case ARM::t2B:
case ARM::t2Bcc:
Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
break;
}
}
// now decide on encoding size based on branch target range
switch(Inst.getOpcode()) {
// classify tB as either t2B or t1B based on range of immediate operand
case ARM::tB: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2B);
break;
}
// classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
case ARM::tBcc: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2Bcc);
break;
}
}
((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
}
void ARMAsmParser::cvtMVEVMOVQtoDReg(
MCInst &Inst, const OperandVector &Operands) {
// mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
assert(Operands.size() == 8);
((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
// skip second copy of Qd in Operands[6]
((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
}
/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
bool ARMAsmParser::parseMemory(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S, E;
if (Parser.getTok().isNot(AsmToken::LBrac))
return TokError("Token is not a Left Bracket");
S = Parser.getTok().getLoc();
Parser.Lex(); // Eat left bracket token.
const AsmToken &BaseRegTok = Parser.getTok();
int BaseRegNum = tryParseRegister();
if (BaseRegNum == -1)
return Error(BaseRegTok.getLoc(), "register expected");
// The next token must either be a comma, a colon or a closing bracket.
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
!Tok.is(AsmToken::RBrac))
return Error(Tok.getLoc(), "malformed memory operand");
if (Tok.is(AsmToken::RBrac)) {
E = Tok.getEndLoc();
Parser.Lex(); // Eat right bracket token.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
ARM_AM::no_shift, 0, 0, false,
S, E));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand. It's rather odd, but syntactically valid.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
Parser.Lex(); // Eat the '!'.
}
return false;
}
assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
"Lost colon or comma in memory operand?!");
if (Tok.is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
}
// If we have a ':', it's an alignment specifier.
if (Parser.getTok().is(AsmToken::Colon)) {
Parser.Lex(); // Eat the ':'.
E = Parser.getTok().getLoc();
SMLoc AlignmentLoc = Tok.getLoc();
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
return true;
// The expression has to be a constant. Memory references with relocations
// don't come through here, as they use the <label> forms of the relevant
// instructions.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
if (!CE)
return Error (E, "constant expression expected");
unsigned Align = 0;
switch (CE->getValue()) {
default:
return Error(E,
"alignment specifier must be 16, 32, 64, 128, or 256 bits");
case 16: Align = 2; break;
case 32: Align = 4; break;
case 64: Align = 8; break;
case 128: Align = 16; break;
case 256: Align = 32; break;
}
// Now we should have the closing ']'
if (Parser.getTok().isNot(AsmToken::RBrac))
return Error(Parser.getTok().getLoc(), "']' expected");
E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
// Don't worry about range checking the value here. That's handled by
// the is*() predicates.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
ARM_AM::no_shift, 0, Align,
false, S, E, AlignmentLoc));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
Parser.Lex(); // Eat the '!'.
}
return false;
}
// If we have a '#', it's an immediate offset, else assume it's a register
// offset. Be friendly and also accept a plain integer (without a leading
// hash) for gas compatibility.
if (Parser.getTok().is(AsmToken::Hash) ||
Parser.getTok().is(AsmToken::Dollar) ||
Parser.getTok().is(AsmToken::Integer)) {
if (Parser.getTok().isNot(AsmToken::Integer))
Parser.Lex(); // Eat '#' or '$'.
E = Parser.getTok().getLoc();
bool isNegative = getParser().getTok().is(AsmToken::Minus);
const MCExpr *Offset;
if (getParser().parseExpression(Offset))
return true;
// The expression has to be a constant. Memory references with relocations
// don't come through here, as they use the <label> forms of the relevant
// instructions.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
if (!CE)
return Error (E, "constant expression expected");
// If the constant was #-0, represent it as
// std::numeric_limits<int32_t>::min().
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
getContext());
// Now we should have the closing ']'
if (Parser.getTok().isNot(AsmToken::RBrac))
return Error(Parser.getTok().getLoc(), "']' expected");
E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
// Don't worry about range checking the value here. That's handled by
// the is*() predicates.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
ARM_AM::no_shift, 0, 0,
false, S, E));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
Parser.Lex(); // Eat the '!'.
}
return false;
}
// The register offset is optionally preceded by a '+' or '-'
bool isNegative = false;
if (Parser.getTok().is(AsmToken::Minus)) {
isNegative = true;
Parser.Lex(); // Eat the '-'.
} else if (Parser.getTok().is(AsmToken::Plus)) {
// Nothing to do.
Parser.Lex(); // Eat the '+'.
}
E = Parser.getTok().getLoc();
int OffsetRegNum = tryParseRegister();
if (OffsetRegNum == -1)
return Error(E, "register expected");
// If there's a shift operator, handle it.
ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
unsigned ShiftImm = 0;
if (Parser.getTok().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the ','.
if (parseMemRegOffsetShift(ShiftType, ShiftImm))
return true;
}
// Now we should have the closing ']'
if (Parser.getTok().isNot(AsmToken::RBrac))
return Error(Parser.getTok().getLoc(), "']' expected");
E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
ShiftType, ShiftImm, 0, isNegative,
S, E));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
Parser.Lex(); // Eat the '!'.
}
return false;
}
/// parseMemRegOffsetShift - one of these two:
/// ( lsl | lsr | asr | ror ) , # shift_amount
/// rrx
/// return true if it parses a shift otherwise it returns false.
bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
unsigned &Amount) {
MCAsmParser &Parser = getParser();
SMLoc Loc = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return Error(Loc, "illegal shift operator");
StringRef ShiftName = Tok.getString();
if (ShiftName == "lsl" || ShiftName == "LSL" ||
ShiftName == "asl" || ShiftName == "ASL")
St = ARM_AM::lsl;
else if (ShiftName == "lsr" || ShiftName == "LSR")
St = ARM_AM::lsr;
else if (ShiftName == "asr" || ShiftName == "ASR")
St = ARM_AM::asr;
else if (ShiftName == "ror" || ShiftName == "ROR")
St = ARM_AM::ror;
else if (ShiftName == "rrx" || ShiftName == "RRX")
St = ARM_AM::rrx;
else if (ShiftName == "uxtw" || ShiftName == "UXTW")
St = ARM_AM::uxtw;
else
return Error(Loc, "illegal shift operator");
Parser.Lex(); // Eat shift type token.
// rrx stands alone.
Amount = 0;
if (St != ARM_AM::rrx) {
Loc = Parser.getTok().getLoc();
// A '#' and a shift amount.
const AsmToken &HashTok = Parser.getTok();
if (HashTok.isNot(AsmToken::Hash) &&
HashTok.isNot(AsmToken::Dollar))
return Error(HashTok.getLoc(), "'#' expected");
Parser.Lex(); // Eat hash token.
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
return true;
// Range check the immediate.
// lsl, ror: 0 <= imm <= 31
// lsr, asr: 0 <= imm <= 32
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
if (!CE)
return Error(Loc, "shift amount must be an immediate");
int64_t Imm = CE->getValue();
if (Imm < 0 ||
((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
return Error(Loc, "immediate shift value out of range");
// If <ShiftTy> #0, turn it into a no_shift.
if (Imm == 0)
St = ARM_AM::lsl;
// For consistency, treat lsr #32 and asr #32 as having immediate value 0.
if (Imm == 32)
Imm = 0;
Amount = Imm;
}
return false;
}
/// parseFPImm - A floating point immediate expression operand.
OperandMatchResultTy
ARMAsmParser::parseFPImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
// Anything that can accept a floating point constant as an operand
// needs to go through here, as the regular parseExpression is
// integer only.
//
// This routine still creates a generic Immediate operand, containing
// a bitcast of the 64-bit floating point value. The various operands
// that accept floats can check whether the value is valid for them
// via the standard is*() predicates.
SMLoc S = Parser.getTok().getLoc();
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
// Disambiguate the VMOV forms that can accept an FP immediate.
// vmov.f32 <sreg>, #imm
// vmov.f64 <dreg>, #imm
// vmov.f32 <dreg>, #imm @ vector f32x2
// vmov.f32 <qreg>, #imm @ vector f32x4
//
// There are also the NEON VMOV instructions which expect an
// integer constant. Make sure we don't try to parse an FPImm
// for these:
// vmov.i{8|16|32|64} <dreg|qreg>, #imm
ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
bool isVmovf = TyOp.isToken() &&
(TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
TyOp.getToken() == ".f16");
ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
Mnemonic.getToken() == "fconsts");
if (!(isVmovf || isFconst))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat '#' or '$'.
// Handle negation, as that still comes through as a separate token.
bool isNegative = false;
if (Parser.getTok().is(AsmToken::Minus)) {
isNegative = true;
Parser.Lex();
}
const AsmToken &Tok = Parser.getTok();
SMLoc Loc = Tok.getLoc();
if (Tok.is(AsmToken::Real) && isVmovf) {
APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
// If we had a '-' in front, toggle the sign bit.
IntVal ^= (uint64_t)isNegative << 31;
Parser.Lex(); // Eat the token.
Operands.push_back(ARMOperand::CreateImm(
MCConstantExpr::create(IntVal, getContext()),
S, Parser.getTok().getLoc()));
return MatchOperand_Success;
}
// Also handle plain integers. Instructions which allow floating point
// immediates also allow a raw encoded 8-bit value.
if (Tok.is(AsmToken::Integer) && isFconst) {
int64_t Val = Tok.getIntVal();
Parser.Lex(); // Eat the token.
if (Val > 255 || Val < 0) {
Error(Loc, "encoded floating point value out of range");
return MatchOperand_ParseFail;
}
float RealVal = ARM_AM::getFPImmFloat(Val);
Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
Operands.push_back(ARMOperand::CreateImm(
MCConstantExpr::create(Val, getContext()), S,
Parser.getTok().getLoc()));
return MatchOperand_Success;
}
Error(Loc, "invalid floating point immediate");
return MatchOperand_ParseFail;
}
/// Parse a arm instruction operand. For now this parses the operand regardless
/// of the mnemonic.
bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
MCAsmParser &Parser = getParser();
SMLoc S, E;
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
if (ResTy == MatchOperand_Success)
return false;
// If there wasn't a custom match, try the generic matcher below. Otherwise,
// there was a match, but an error occurred, in which case, just return that
// the operand parsing failed.
if (ResTy == MatchOperand_ParseFail)
return true;
switch (getLexer().getKind()) {
default:
Error(Parser.getTok().getLoc(), "unexpected token in operand");
return true;
case AsmToken::Identifier: {
// If we've seen a branch mnemonic, the next operand must be a label. This
// is true even if the label is a register name. So "br r1" means branch to
// label "r1".
bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
if (!ExpectLabel) {
if (!tryParseRegisterWithWriteBack(Operands))
return false;
int Res = tryParseShiftRegister(Operands);
if (Res == 0) // success
return false;
else if (Res == -1) // irrecoverable error
return true;
// If this is VMRS, check for the apsr_nzcv operand.
if (Mnemonic == "vmrs" &&
Parser.getTok().getString().equals_lower("apsr_nzcv")) {
S = Parser.getTok().getLoc();
Parser.Lex();
Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
return false;
}
}
// Fall though for the Identifier case that is not a register or a
// special name.
LLVM_FALLTHROUGH;
}
case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
case AsmToken::Integer: // things like 1f and 2b as a branch targets
case AsmToken::String: // quoted label names.
case AsmToken::Dot: { // . as a branch target
// This was not a register so parse other operands that start with an
// identifier (like labels) as expressions and create them as immediates.
const MCExpr *IdVal;
S = Parser.getTok().getLoc();
if (getParser().parseExpression(IdVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
return false;
}
case AsmToken::LBrac:
return parseMemory(Operands);
case AsmToken::LCurly:
return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
case AsmToken::Dollar:
case AsmToken::Hash:
// #42 -> immediate.
S = Parser.getTok().getLoc();
Parser.Lex();
if (Parser.getTok().isNot(AsmToken::Colon)) {
bool isNegative = Parser.getTok().is(AsmToken::Minus);
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
if (CE) {
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
getContext());
}
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
// There can be a trailing '!' on operands that we want as a separate
// '!' Token operand. Handle that here. For example, the compatibility
// alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
Parser.getTok().getLoc()));
Parser.Lex(); // Eat exclaim token
}
return false;
}
// w/ a ':' after the '#', it's just like a plain ':'.
LLVM_FALLTHROUGH;
case AsmToken::Colon: {
S = Parser.getTok().getLoc();
// ":lower16:" and ":upper16:" expression prefixes
// FIXME: Check it's an expression prefix,
// e.g. (FOO - :lower16:BAR) isn't legal.
ARMMCExpr::VariantKind RefKind;
if (parsePrefix(RefKind))
return true;
const MCExpr *SubExprVal;
if (getParser().parseExpression(SubExprVal))
return true;
const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
getContext());
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
return false;
}
case AsmToken::Equal: {
S = Parser.getTok().getLoc();
if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
return Error(S, "unexpected token in operand");
Parser.Lex(); // Eat '='
const MCExpr *SubExprVal;
if (getParser().parseExpression(SubExprVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
// execute-only: we assume that assembly programmers know what they are
// doing and allow literal pool creation here
Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
return false;
}
}
}
// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
// :lower16: and :upper16:.
bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
MCAsmParser &Parser = getParser();
RefKind = ARMMCExpr::VK_ARM_None;
// consume an optional '#' (GNU compatibility)
if (getLexer().is(AsmToken::Hash))
Parser.Lex();
// :lower16: and :upper16: modifiers
assert(getLexer().is(AsmToken::Colon) && "expected a :");
Parser.Lex(); // Eat ':'
if (getLexer().isNot(AsmToken::Identifier)) {
Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
return true;
}
enum {
COFF = (1 << MCObjectFileInfo::IsCOFF),
ELF = (1 << MCObjectFileInfo::IsELF),
MACHO = (1 << MCObjectFileInfo::IsMachO),
WASM = (1 << MCObjectFileInfo::IsWasm),
};
static const struct PrefixEntry {
const char *Spelling;
ARMMCExpr::VariantKind VariantKind;
uint8_t SupportedFormats;
} PrefixEntries[] = {
{ "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
{ "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
};
StringRef IDVal = Parser.getTok().getIdentifier();
const auto &Prefix =
std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
[&IDVal](const PrefixEntry &PE) {
return PE.Spelling == IDVal;
});
if (Prefix == std::end(PrefixEntries)) {
Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
return true;
}
uint8_t CurrentFormat;
switch (getContext().getObjectFileInfo()->getObjectFileType()) {
case MCObjectFileInfo::IsMachO:
CurrentFormat = MACHO;
break;
case MCObjectFileInfo::IsELF:
CurrentFormat = ELF;
break;
case MCObjectFileInfo::IsCOFF:
CurrentFormat = COFF;
break;
case MCObjectFileInfo::IsWasm:
CurrentFormat = WASM;
break;
case MCObjectFileInfo::IsXCOFF:
llvm_unreachable("unexpected object format");
break;
}
if (~Prefix->SupportedFormats & CurrentFormat) {
Error(Parser.getTok().getLoc(),
"cannot represent relocation in the current file format");
return true;
}
RefKind = Prefix->VariantKind;
Parser.Lex();
if (getLexer().isNot(AsmToken::Colon)) {
Error(Parser.getTok().getLoc(), "unexpected token after prefix");
return true;
}
Parser.Lex(); // Eat the last ':'
return false;
}
/// Given a mnemonic, split out possible predication code and carry
/// setting letters to form a canonical mnemonic and flags.
//
// FIXME: Would be nice to autogen this.
// FIXME: This is a bit of a maze of special cases.
StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
StringRef ExtraToken,
unsigned &PredicationCode,
unsigned &VPTPredicationCode,
bool &CarrySetting,
unsigned &ProcessorIMod,
StringRef &ITMask) {
PredicationCode = ARMCC::AL;
VPTPredicationCode = ARMVCC::None;
CarrySetting = false;
ProcessorIMod = 0;
// Ignore some mnemonics we know aren't predicated forms.
//
// FIXME: Would be nice to autogen this.
if ((Mnemonic == "movs" && isThumb()) ||
Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
Mnemonic == "bxns" || Mnemonic == "blxns" ||
Mnemonic == "vudot" || Mnemonic == "vsdot" ||
Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
Mnemonic == "csel" || Mnemonic == "csinc" ||
Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
Mnemonic == "csetm")
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
// predicated but do have a carry-set and so weren't caught above.
if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
Mnemonic != "sbcs" && Mnemonic != "rscs" &&
!(hasMVE() &&
(Mnemonic == "vmine" ||
Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
Mnemonic == "vmvne" || Mnemonic == "vorne" ||
Mnemonic == "vnege" || Mnemonic == "vnegt" ||
Mnemonic == "vmule" || Mnemonic == "vmult" ||
Mnemonic == "vrintne" ||
Mnemonic == "vcmult" || Mnemonic == "vcmule" ||
Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
Mnemonic.startswith("vq")))) {
unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
if (CC != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
PredicationCode = CC;
}
}
// Next, determine if we have a carry setting bit. We explicitly ignore all
// the instructions we know end in 's'.
if (Mnemonic.endswith("s") &&
!(Mnemonic == "cps" || Mnemonic == "mls" ||
Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" ||
Mnemonic == "vmlas" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
}
// The "cps" instruction can have a interrupt mode operand which is glued into
// the mnemonic. Check if this is the case, split it and parse the imod op
if (Mnemonic.startswith("cps")) {
// Split out any imod code.
unsigned IMod =
StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
.Case("ie", ARM_PROC::IE)
.Case("id", ARM_PROC::ID)
.Default(~0U);
if (IMod != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
ProcessorIMod = IMod;
}
}
if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
if (CC != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
VPTPredicationCode = CC;
}
return Mnemonic;
}
// The "it" instruction has the condition mask on the end of the mnemonic.
if (Mnemonic.startswith("it")) {
ITMask = Mnemonic.slice(2, Mnemonic.size());
Mnemonic = Mnemonic.slice(0, 2);
}
if (Mnemonic.startswith("vpst")) {
ITMask = Mnemonic.slice(4, Mnemonic.size());
Mnemonic = Mnemonic.slice(0, 4);
}
else if (Mnemonic.startswith("vpt")) {
ITMask = Mnemonic.slice(3, Mnemonic.size());
Mnemonic = Mnemonic.slice(0, 3);
}
return Mnemonic;
}
/// Given a canonical mnemonic, determine if the instruction ever allows
/// inclusion of carry set or predication code operands.
//
// FIXME: It would be nice to autogen this.
void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
StringRef ExtraToken,
StringRef FullInst,
bool &CanAcceptCarrySet,
bool &CanAcceptPredicationCode,
bool &CanAcceptVPTPredicationCode) {
CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
CanAcceptCarrySet =
Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
(!isThumb() &&
(Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
(FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
Mnemonic == "vmovx" || Mnemonic == "vins" ||
Mnemonic == "vudot" || Mnemonic == "vsdot" ||
Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
Mnemonic == "sb" || Mnemonic == "ssbb" ||
Mnemonic == "pssbb" ||
Mnemonic == "bfcsel" || Mnemonic == "wls" ||
Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
Mnemonic == "cset" || Mnemonic == "csetm" ||
Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") ||
(hasMVE() &&
(Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") ||
Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") ||
Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") ||
Mnemonic.startswith("letp")))) {
// These mnemonics are never predicable
CanAcceptPredicationCode = false;
} else if (!isThumb()) {
// Some instructions are only predicable in Thumb mode
CanAcceptPredicationCode =
Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
Mnemonic != "stc2" && Mnemonic != "stc2l" &&
Mnemonic != "tsb" &&
!Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
} else if (isThumbOne()) {
if (hasV6MOps())
CanAcceptPredicationCode = Mnemonic != "movs";
else
CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
} else
CanAcceptPredicationCode = true;
}
// Some Thumb instructions have two operand forms that are not
// available as three operand, convert to two operand form if possible.
//
// FIXME: We would really like to be able to tablegen'erate this.
void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
bool CarrySetting,
OperandVector &Operands) {
if (Operands.size() != 6)
return;
const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
if (!Op3.isReg() || !Op4.isReg())
return;
auto Op3Reg = Op3.getReg();
auto Op4Reg = Op4.getReg();
// For most Thumb2 cases we just generate the 3 operand form and reduce
// it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
// won't accept SP or PC so we do the transformation here taking care
// with immediate range in the 'add sp, sp #imm' case.
auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
if (isThumbTwo()) {
if (Mnemonic != "add")
return;
bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
(Op5.isReg() && Op5.getReg() == ARM::PC);
if (!TryTransform) {
TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
(Op5.isReg() && Op5.getReg() == ARM::SP)) &&
!(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
Op5.isImm() && !Op5.isImm0_508s4());
}
if (!TryTransform)
return;
} else if (!isThumbOne())
return;
if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
return;
// If first 2 operands of a 3 operand instruction are the same
// then transform to 2 operand version of the same instruction
// e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
bool Transform = Op3Reg == Op4Reg;
// For communtative operations, we might be able to transform if we swap
// Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
// as tADDrsp.
const ARMOperand *LastOp = &Op5;
bool Swap = false;
if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
((Mnemonic == "add" && Op4Reg != ARM::SP) ||
Mnemonic == "and" || Mnemonic == "eor" ||
Mnemonic == "adc" || Mnemonic == "orr")) {
Swap = true;
LastOp = &Op4;
Transform = true;
}
// If both registers are the same then remove one of them from
// the operand list, with certain exceptions.
if (Transform) {
// Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
// 2 operand forms don't exist.
if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
LastOp->isReg())
Transform = false;
// Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
// 3-bits because the ARMARM says not to.
if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
Transform = false;
}
if (Transform) {
if (Swap)
std::swap(Op4, Op5);
Operands.erase(Operands.begin() + 3);
}
}
bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
OperandVector &Operands) {
// FIXME: This is all horribly hacky. We really need a better way to deal
// with optional operands like this in the matcher table.
// The 'mov' mnemonic is special. One variant has a cc_out operand, while
// another does not. Specifically, the MOVW instruction does not. So we
// special case it here and remove the defaulted (non-setting) cc_out
// operand if that's the instruction we're trying to match.
//
// We do this as post-processing of the explicit operands rather than just
// conditionally adding the cc_out in the first place because we need
// to check the type of the parsed immediate operand.
if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
!static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
return true;
// Register-register 'add' for thumb does not have a cc_out operand
// when there are only two register operands.
if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
return true;
// Register-register 'add' for thumb does not have a cc_out operand
// when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
// have to check the immediate range here since Thumb2 has a variant
// that can handle a different range and has a cc_out operand.
if (((isThumb() && Mnemonic == "add") ||
(isThumbTwo() && Mnemonic == "sub")) &&
Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
return true;
// For Thumb2, add/sub immediate does not have a cc_out operand for the
// imm0_4095 variant. That's the least-preferred variant when
// selecting via the generic "add" mnemonic, so to know that we
// should remove the cc_out operand, we have to explicitly check that
// it's not one of the other variants. Ugh.
if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
static_cast<ARMOperand &>(*Operands[5]).isImm()) {
// Nest conditions rather than one big 'if' statement for readability.
//
// If both registers are low, we're in an IT block, and the immediate is
// in range, we should use encoding T1 instead, which has a cc_out.
if (inITBlock() &&
isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
return false;
// Check against T3. If the second register is the PC, this is an
// alternate form of ADR, which uses encoding T4, so check for that too.
if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
return false;
// Otherwise, we use encoding T4, which does not have a cc_out
// operand.
return true;
}
// The thumb2 multiply instruction doesn't have a CCOut register, so
// if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
// use the 16-bit encoding or not.
if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
static_cast<ARMOperand &>(*Operands[5]).isReg() &&
// If the registers aren't low regs, the destination reg isn't the
// same as one of the source regs, or the cc_out operand is zero
// outside of an IT block, we have to use the 32-bit encoding, so
// remove the cc_out operand.
(!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
!isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
!isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
!inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
static_cast<ARMOperand &>(*Operands[5]).getReg() &&
static_cast<ARMOperand &>(*Operands[3]).getReg() !=
static_cast<ARMOperand &>(*Operands[4]).getReg())))
return true;
// Also check the 'mul' syntax variant that doesn't specify an explicit
// destination register.
if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
// If the registers aren't low regs or the cc_out operand is zero
// outside of an IT block, we have to use the 32-bit encoding, so
// remove the cc_out operand.
(!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
!isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
!inITBlock()))
return true;
// Register-register 'add/sub' for thumb does not have a cc_out operand
// when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
// the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
// right, this will result in better diagnostics (which operand is off)
// anyway.
if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
(Operands.size() == 5 || Operands.size() == 6) &&
static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
(static_cast<ARMOperand &>(*Operands[4]).isImm() ||
(Operands.size() == 6 &&
static_cast<ARMOperand &>(*Operands[5]).isImm())))
return true;
return false;
}
bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
OperandVector &Operands) {
// VRINT{Z, X} have a predicate operand in VFP, but not in NEON
unsigned RegIdx = 3;
if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
Mnemonic == "vrintr") &&
(static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
(static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
RegIdx = 4;
if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
(ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
return true;
}
return false;
}
bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
OperandVector &Operands) {
if (!hasMVE() || Operands.size() < 3)
return true;
if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") ||
Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4"))
return true;
if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot"))
return false;
if (Mnemonic.startswith("vmov") &&
!(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
Mnemonic.startswith("vmovx"))) {
for (auto &Operand : Operands) {
if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
((*Operand).isReg() &&
(ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
(*Operand).getReg()) ||
ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
(*Operand).getReg())))) {
return true;
}
}
return false;
} else {
for (auto &Operand : Operands) {
// We check the larger class QPR instead of just the legal class
// MQPR, to more accurately report errors when using Q registers
// outside of the allowed range.
if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
(Operand->isReg() &&
(ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
Operand->getReg()))))
return false;
}
return true;
}
}
static bool isDataTypeToken(StringRef Tok) {
return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
Tok == ".f" || Tok == ".d";
}
// FIXME: This bit should probably be handled via an explicit match class
// in the .td files that matches the suffix instead of having it be
// a literal string token the way it is now.
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
}
static void applyMnemonicAliases(StringRef &Mnemonic,
const FeatureBitset &Features,
unsigned VariantID);
// The GNU assembler has aliases of ldrd and strd with the second register
// omitted. We don't have a way to do that in tablegen, so fix it up here.
//
// We have to be careful to not emit an invalid Rt2 here, because the rest of
// the assmebly parser could then generate confusing diagnostics refering to
// it. If we do find anything that prevents us from doing the transformation we
// bail out, and let the assembly parser report an error on the instruction as
// it is written.
void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
OperandVector &Operands) {
if (Mnemonic != "ldrd" && Mnemonic != "strd")
return;
if (Operands.size() < 4)
return;
ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
if (!Op2.isReg())
return;
if (!Op3.isGPRMem())
return;
const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
if (!GPR.contains(Op2.getReg()))
return;
unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
if (!isThumb() && (RtEncoding & 1)) {
// In ARM mode, the registers must be from an aligned pair, this
// restriction does not apply in Thumb mode.
return;
}
if (Op2.getReg() == ARM::PC)
return;
unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
if (!PairedReg || PairedReg == ARM::PC ||
(PairedReg == ARM::SP && !hasV8Ops()))
return;
Operands.insert(
Operands.begin() + 3,
ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
}
/// Parse an arm instruction mnemonic followed by its operands.
bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
// Apply mnemonic aliases before doing anything else, as the destination
// mnemonic may include suffices and we want to handle them normally.
// The generic tblgen'erated code does this later, at the start of
// MatchInstructionImpl(), but that's too late for aliases that include
// any sort of suffix.
const FeatureBitset &AvailableFeatures = getAvailableFeatures();
unsigned AssemblerDialect = getParser().getAssemblerDialect();
applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
// First check for the ARM-specific .req directive.
if (Parser.getTok().is(AsmToken::Identifier) &&
Parser.getTok().getIdentifier() == ".req") {
parseDirectiveReq(Name, NameLoc);
// We always return 'error' for this, as we're done with this
// statement and don't need to match the 'instruction."
return true;
}
// Create the leading tokens for the mnemonic, split by '.' characters.
size_t Start = 0, Next = Name.find('.');
StringRef Mnemonic = Name.slice(Start, Next);
StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
// Split out the predication code and carry setting flag from the mnemonic.
unsigned PredicationCode;
unsigned VPTPredicationCode;
unsigned ProcessorIMod;
bool CarrySetting;
StringRef ITMask;
Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
CarrySetting, ProcessorIMod, ITMask);
// In Thumb1, only the branch (B) instruction can be predicated.
if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
return Error(NameLoc, "conditional execution not supported in Thumb1");
}
Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
// Handle the mask for IT and VPT instructions. In ARMOperand and
// MCOperand, this is stored in a format independent of the
// condition code: the lowest set bit indicates the end of the
// encoding, and above that, a 1 bit indicates 'else', and an 0
// indicates 'then'. E.g.
// IT -> 1000
// ITx -> x100 (ITT -> 0100, ITE -> 1100)
// ITxy -> xy10 (e.g. ITET -> 1010)
// ITxyz -> xyz1 (e.g. ITEET -> 1101)
if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
Mnemonic.startswith("vpst")) {
SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
SMLoc::getFromPointer(NameLoc.getPointer() + 4);
if (ITMask.size() > 3) {
if (Mnemonic == "it")
return Error(Loc, "too many conditions on IT instruction");
return Error(Loc, "too many conditions on VPT instruction");
}
unsigned Mask = 8;
for (unsigned i = ITMask.size(); i != 0; --i) {
char pos = ITMask[i - 1];
if (pos != 't' && pos != 'e') {
return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
}
Mask >>= 1;
if (ITMask[i - 1] == 'e')
Mask |= 8;
}
Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
}
// FIXME: This is all a pretty gross hack. We should automatically handle
// optional operands like this via tblgen.
// Next, add the CCOut and ConditionCode operands, if needed.
//
// For mnemonics which can ever incorporate a carry setting bit or predication
// code, our matching model involves us always generating CCOut and
// ConditionCode operands to match the mnemonic "as written" and then we let
// the matcher deal with finding the right instruction or generating an
// appropriate error.
bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
// If we had a carry-set on an instruction that can't do that, issue an
// error.
if (!CanAcceptCarrySet && CarrySetting) {
return Error(NameLoc, "instruction '" + Mnemonic +
"' can not set flags, but 's' suffix specified");
}
// If we had a predication code on an instruction that can't do that, issue an
// error.
if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
return Error(NameLoc, "instruction '" + Mnemonic +
"' is not predicable, but condition code specified");
}
// If we had a VPT predication code on an instruction that can't do that, issue an
// error.
if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
return Error(NameLoc, "instruction '" + Mnemonic +
"' is not VPT predicable, but VPT code T/E is specified");
}
// Add the carry setting operand, if necessary.
if (CanAcceptCarrySet) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
Loc));
}
// Add the predication code operand, if necessary.
if (CanAcceptPredicationCode) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
CarrySetting);
Operands.push_back(ARMOperand::CreateCondCode(
ARMCC::CondCodes(PredicationCode), Loc));
}
// Add the VPT predication code operand, if necessary.
// FIXME: We don't add them for the instructions filtered below as these can
// have custom operands which need special parsing. This parsing requires
// the operand to be in the same place in the OperandVector as their
// definition in tblgen. Since these instructions may also have the
// scalar predication operand we do not add the vector one and leave until
// now to fix it up.
if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
!Mnemonic.startswith("vcmp") &&
!(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" &&
Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
CarrySetting);
Operands.push_back(ARMOperand::CreateVPTPred(
ARMVCC::VPTCodes(VPTPredicationCode), Loc));
}
// Add the processor imod operand, if necessary.
if (ProcessorIMod) {
Operands.push_back(ARMOperand::CreateImm(
MCConstantExpr::create(ProcessorIMod, getContext()),
NameLoc, NameLoc));
} else if (Mnemonic == "cps" && isMClass()) {
return Error(NameLoc, "instruction 'cps' requires effect for M-class");
}
// Add the remaining tokens in the mnemonic.
while (Next != StringRef::npos) {
Start = Next;
Next = Name.find('.', Start + 1);
ExtraToken = Name.slice(Start, Next);
// Some NEON instructions have an optional datatype suffix that is
// completely ignored. Check for that.
if (isDataTypeToken(ExtraToken) &&
doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
continue;
// For for ARM mode generate an error if the .n qualifier is used.
if (ExtraToken == ".n" && !isThumb()) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
"arm mode");
}
// The .n qualifier is always discarded as that is what the tables
// and matcher expect. In ARM mode the .w qualifier has no effect,
// so discard it to avoid errors that can be caused by the matcher.
if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
}
}
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
if (parseOperand(Operands, Mnemonic)) {
return true;
}
while (parseOptionalToken(AsmToken::Comma)) {
// Parse and remember the operand.
if (parseOperand(Operands, Mnemonic)) {
return true;
}
}
}
if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
return true;
tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
// Some instructions, mostly Thumb, have forms for the same mnemonic that
// do and don't have a cc_out optional-def operand. With some spot-checks
// of the operand list, we can figure out which variant we're trying to
// parse and adjust accordingly before actually matching. We shouldn't ever
// try to remove a cc_out operand that was explicitly set on the
// mnemonic, of course (CarrySetting == true). Reason number #317 the
// table driven matcher doesn't fit well with the ARM instruction set.
if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
Operands.erase(Operands.begin() + 1);
// Some instructions have the same mnemonic, but don't always
// have a predicate. Distinguish them here and delete the
// appropriate predicate if needed. This could be either the scalar
// predication code or the vector predication code.
if (PredicationCode == ARMCC::AL &&
shouldOmitPredicateOperand(Mnemonic, Operands))
Operands.erase(Operands.begin() + 1);
if (hasMVE()) {
if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
// Very nasty hack to deal with the vector predicated variant of vmovlt
// the scalar predicated vmov with condition 'lt'. We can not tell them
// apart until we have parsed their operands.
Operands.erase(Operands.begin() + 1);
Operands.erase(Operands.begin());
SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
Mnemonic.size() - 1 + CarrySetting);
Operands.insert(Operands.begin(),
ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
Operands.insert(Operands.begin(),
ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
} else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
// Another nasty hack to deal with the ambiguity between vcvt with scalar
// predication 'ne' and vcvtn with vector predication 'e'. As above we
// can only distinguish between the two after we have parsed their
// operands.
Operands.erase(Operands.begin() + 1);
Operands.erase(Operands.begin());
SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
Mnemonic.size() - 1 + CarrySetting);
Operands.insert(Operands.begin(),
ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
Operands.insert(Operands.begin(),
ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
} else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
// Another hack, this time to distinguish between scalar predicated vmul
// with 'lt' predication code and the vector instruction vmullt with
// vector predication code "none"
Operands.erase(Operands.begin() + 1);
Operands.erase(Operands.begin());
SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
Operands.insert(Operands.begin(),
ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
}
// For vmov and vcmp, as mentioned earlier, we did not add the vector
// predication code, since these may contain operands that require
// special parsing. So now we have to see if they require vector
// predication and replace the scalar one with the vector predication
// operand if that is the case.
else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") ||
(Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") &&
!Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") &&
!Mnemonic.startswith("vcvtm"))) {
if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
// We could not split the vector predicate off vcvt because it might
// have been the scalar vcvtt instruction. Now we know its a vector
// instruction, we still need to check whether its the vector
// predicated vcvt with 'Then' predication or the vector vcvtt. We can
// distinguish the two based on the suffixes, if it is any of
// ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) {
auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") &&
Sz2.isToken() && Sz2.getToken().startswith(".f"))) {
Operands.erase(Operands.begin());
SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
VPTPredicationCode = ARMVCC::Then;
Mnemonic = Mnemonic.substr(0, 4);
Operands.insert(Operands.begin(),
ARMOperand::CreateToken(Mnemonic, MLoc));
}
}
Operands.erase(Operands.begin() + 1);
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
Mnemonic.size() + CarrySetting);
Operands.insert(Operands.begin() + 1,
ARMOperand::CreateVPTPred(
ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
}
} else if (CanAcceptVPTPredicationCode) {
// For all other instructions, make sure only one of the two
// predication operands is left behind, depending on whether we should
// use the vector predication.
if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
if (CanAcceptPredicationCode)
Operands.erase(Operands.begin() + 2);
else
Operands.erase(Operands.begin() + 1);
} else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
Operands.erase(Operands.begin() + 1);
}
}
}
if (VPTPredicationCode != ARMVCC::None) {
bool usedVPTPredicationCode = false;
for (unsigned I = 1; I < Operands.size(); ++I)
if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
usedVPTPredicationCode = true;
if (!usedVPTPredicationCode) {
// If we have a VPT predication code and we haven't just turned it
// into an operand, then it was a mistake for splitMnemonic to
// separate it from the rest of the mnemonic in the first place,
// and this may lead to wrong disassembly (e.g. scalar floating
// point VCMPE is actually a different instruction from VCMP, so
// we mustn't treat them the same). In that situation, glue it
// back on.
Mnemonic = Name.slice(0, Mnemonic.size() + 1);
Operands.erase(Operands.begin());
Operands.insert(Operands.begin(),
ARMOperand::CreateToken(Mnemonic, NameLoc));
}
}
// ARM mode 'blx' need special handling, as the register operand version
// is predicable, but the label operand version is not. So, we can't rely
// on the Mnemonic based checking to correctly figure out when to put
// a k_CondCode operand in the list. If we're trying to match the label
// version, remove the k_CondCode operand here.
if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
static_cast<ARMOperand &>(*Operands[2]).isImm())
Operands.erase(Operands.begin() + 1);
// Adjust operands of ldrexd/strexd to MCK_GPRPair.
// ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
// a single GPRPair reg operand is used in the .td file to replace the two
// GPRs. However, when parsing from asm, the two GRPs cannot be
// automatically
// expressed as a GPRPair, so we have to manually merge them.
// FIXME: We would really like to be able to tablegen'erate this.
if (!isThumb() && Operands.size() > 4 &&
(Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
Mnemonic == "stlexd")) {
bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
unsigned Idx = isLoad ? 2 : 3;
ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
// Adjust only if Op1 and Op2 are GPRs.
if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
MRC.contains(Op2.getReg())) {
unsigned Reg1 = Op1.getReg();
unsigned Reg2 = Op2.getReg();
unsigned Rt = MRI->getEncodingValue(Reg1);
unsigned Rt2 = MRI->getEncodingValue(Reg2);
// Rt2 must be Rt + 1 and Rt must be even.
if (Rt + 1 != Rt2 || (Rt & 1)) {
return Error(Op2.getStartLoc(),
isLoad ? "destination operands must be sequential"
: "source operands must be sequential");
}
unsigned NewReg = MRI->getMatchingSuperReg(
Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
Operands[Idx] =
ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
Operands.erase(Operands.begin() + Idx + 1);
}
}
// GNU Assembler extension (compatibility).
fixupGNULDRDAlias(Mnemonic, Operands);
// FIXME: As said above, this is all a pretty gross hack. This instruction
// does not fit with other "subs" and tblgen.
// Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
// so the Mnemonic is the original name "subs" and delete the predicate
// operand so it will match the table entry.
if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
static_cast<ARMOperand &>(*Operands[3]).isReg() &&
static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
static_cast<ARMOperand &>(*Operands[4]).isReg() &&
static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
static_cast<ARMOperand &>(*Operands[5]).isImm()) {
Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
Operands.erase(Operands.begin() + 1);
}
return false;
}
// Validate context-sensitive operand constraints.
// return 'true' if register list contains non-low GPR registers,
// 'false' otherwise. If Reg is in the register list or is HiReg, set
// 'containsReg' to true.
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
unsigned Reg, unsigned HiReg,
bool &containsReg) {
containsReg = false;
for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
unsigned OpReg = Inst.getOperand(i).getReg();
if (OpReg == Reg)
containsReg = true;
// Anything other than a low register isn't legal here.
if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
return true;
}
return false;
}
// Check if the specified regisgter is in the register list of the inst,
// starting at the indicated operand number.
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
unsigned OpReg = Inst.getOperand(i).getReg();
if (OpReg == Reg)
return true;
}
return false;
}
// Return true if instruction has the interesting property of being
// allowed in IT blocks, but not being predicable.
static bool instIsBreakpoint(const MCInst &Inst) {
return Inst.getOpcode() == ARM::tBKPT ||
Inst.getOpcode() == ARM::BKPT ||
Inst.getOpcode() == ARM::tHLT ||
Inst.getOpcode() == ARM::HLT;
}
bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
const OperandVector &Operands,
unsigned ListNo, bool IsARPop) {
const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
if (!IsARPop && ListContainsSP)
return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
"SP may not be in the register list");
else if (ListContainsPC && ListContainsLR)
return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
"PC and LR may not be in the register list simultaneously");
return false;
}
bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
const OperandVector &Operands,
unsigned ListNo) {
const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
if (ListContainsSP && ListContainsPC)
return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
"SP and PC may not be in the register list");
else if (ListContainsSP)
return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
"SP may not be in the register list");
else if (ListContainsPC)
return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
"PC may not be in the register list");
return false;
}
bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
const OperandVector &Operands,
bool Load, bool ARMMode, bool Writeback) {
unsigned RtIndex = Load || !Writeback ? 0 : 1;
unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
if (ARMMode) {
// Rt can't be R14.
if (Rt == 14)
return Error(Operands[3]->getStartLoc(),
"Rt can't be R14");
// Rt must be even-numbered.
if ((Rt & 1) == 1)
return Error(Operands[3]->getStartLoc(),
"Rt must be even-numbered");
// Rt2 must be Rt + 1.
if (Rt2 != Rt + 1) {
if (Load)
return Error(Operands[3]->getStartLoc(),
"destination operands must be sequential");
else
return Error(Operands[3]->getStartLoc(),
"source operands must be sequential");
}
// FIXME: Diagnose m == 15
// FIXME: Diagnose ldrd with m == t || m == t2.
}
if (!ARMMode && Load) {
if (Rt2 == Rt)
return Error(Operands[3]->getStartLoc(),
"destination operands can't be identical");
}
if (Writeback) {
unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
if (Rn == Rt || Rn == Rt2) {
if (Load)
return Error(Operands[3]->getStartLoc(),
"base register needs to be different from destination "
"registers");
else
return Error(Operands[3]->getStartLoc(),
"source register and base register can't be identical");
}
// FIXME: Diagnose ldrd/strd with writeback and n == 15.
// (Except the immediate form of ldrd?)
}
return false;
}
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
for (unsigned i = 0; i < MCID.NumOperands; ++i) {
if (ARM::isVpred(MCID.OpInfo[i].OperandType))
return i;
}
return -1;
}
static bool isVectorPredicable(const MCInstrDesc &MCID) {
return findFirstVectorPredOperandIdx(MCID) != -1;
}
// FIXME: We would really like to be able to tablegen'erate this.
bool ARMAsmParser::validateInstruction(MCInst &Inst,
const OperandVector &Operands) {
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
SMLoc Loc = Operands[0]->getStartLoc();
// Check the IT block state first.
// NOTE: BKPT and HLT instructions have the interesting property of being
// allowed in IT blocks, but not being predicable. They just always execute.
if (inITBlock() && !instIsBreakpoint(Inst)) {
// The instruction must be predicable.
if (!MCID.isPredicable())
return Error(Loc, "instructions in IT block must be predicable");
ARMCC::CondCodes Cond = ARMCC::CondCodes(
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
if (Cond != currentITCond()) {
// Find the condition code Operand to get its SMLoc information.
SMLoc CondLoc;
for (unsigned I = 1; I < Operands.size(); ++I)
if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
CondLoc = Operands[I]->getStartLoc();
return Error(CondLoc, "incorrect condition in IT block; got '" +
StringRef(ARMCondCodeToString(Cond)) +
"', but expected '" +
ARMCondCodeToString(currentITCond()) + "'");
}
// Check for non-'al' condition codes outside of the IT block.
} else if (isThumbTwo() && MCID.isPredicable() &&
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
Inst.getOpcode() != ARM::t2Bcc &&
Inst.getOpcode() != ARM::t2BFic) {
return Error(Loc, "predicated instructions must be in IT block");
} else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
ARMCC::AL) {
return Warning(Loc, "predicated instructions should be in IT block");
} else if (!MCID.isPredicable()) {
// Check the instruction doesn't have a predicate operand anyway
// that it's not allowed to use. Sometimes this happens in order
// to keep instructions the same shape even though one cannot
// legally be predicated, e.g. vmul.f16 vs vmul.f32.
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
if (MCID.OpInfo[i].isPredicate()) {
if (Inst.getOperand(i).getImm() != ARMCC::AL)
return Error(Loc, "instruction is not predicable");
break;
}
}
}
// PC-setting instructions in an IT block, but not the last instruction of
// the block, are UNPREDICTABLE.
if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
}
if (inVPTBlock() && !instIsBreakpoint(Inst)) {
unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
if (!isVectorPredicable(MCID))
return Error(Loc, "instruction in VPT block must be predicable");
unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
if (Pred != VPTPred) {
SMLoc PredLoc;
for (unsigned I = 1; I < Operands.size(); ++I)
if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
PredLoc = Operands[I]->getStartLoc();
return Error(PredLoc, "incorrect predication in VPT block; got '" +
StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
"', but expected '" +
ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
}
}
else if (isVectorPredicable(MCID) &&
Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
ARMVCC::None)
return Error(Loc, "VPT predicated instructions must be in VPT block");
const unsigned Opcode = Inst.getOpcode();
switch (Opcode) {
case ARM::t2IT: {
// Encoding is unpredictable if it ever results in a notional 'NV'
// predicate. Since we don't parse 'NV' directly this means an 'AL'
// predicate with an "else" mask bit.
unsigned Cond = Inst.getOperand(0).getImm();
unsigned Mask = Inst.getOperand(1).getImm();
// Conditions only allowing a 't' are those with no set bit except
// the lowest-order one that indicates the end of the sequence. In
// other words, powers of 2.
if (Cond == ARMCC::AL && countPopulation(Mask) != 1)
return Error(Loc, "unpredictable IT predicate sequence");
break;
}
case ARM::LDRD:
if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
/*Writeback*/false))
return true;
break;
case ARM::LDRD_PRE:
case ARM::LDRD_POST:
if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
/*Writeback*/true))
return true;
break;
case ARM::t2LDRDi8:
if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
/*Writeback*/false))
return true;
break;
case ARM::t2LDRD_PRE:
case ARM::t2LDRD_POST:
if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
/*Writeback*/true))
return true;
break;
case ARM::t2BXJ: {
const unsigned RmReg = Inst.getOperand(0).getReg();
// Rm = SP is no longer unpredictable in v8-A
if (RmReg == ARM::SP && !hasV8Ops())
return Error(Operands[2]->getStartLoc(),
"r13 (SP) is an unpredictable operand to BXJ");
return false;
}
case ARM::STRD:
if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
/*Writeback*/false))
return true;
break;
case ARM::STRD_PRE:
case ARM::STRD_POST:
if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
/*Writeback*/true))
return true;
break;
case ARM::t2STRD_PRE:
case ARM::t2STRD_POST:
if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
/*Writeback*/true))
return true;
break;
case ARM::STR_PRE_IMM:
case ARM::STR_PRE_REG:
case ARM::t2STR_PRE:
case ARM::STR_POST_IMM:
case ARM::STR_POST_REG:
case ARM::t2STR_POST:
case ARM::STRH_PRE:
case ARM::t2STRH_PRE:
case ARM::STRH_POST:
case ARM::t2STRH_POST:
case ARM::STRB_PRE_IMM:
case ARM::STRB_PRE_REG:
case ARM::t2STRB_PRE:
case ARM::STRB_POST_IMM:
case ARM::STRB_POST_REG:
case ARM::t2STRB_POST: {
// Rt must be different from Rn.
const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
if (Rt == Rn)
return Error(Operands[3]->getStartLoc(),
"source register and base register can't be identical");
return false;
}
case ARM::LDR_PRE_IMM:
case ARM::LDR_PRE_REG:
case ARM::t2LDR_PRE:
case ARM::LDR_POST_IMM:
case ARM::LDR_POST_REG:
case ARM::t2LDR_POST:
case ARM::LDRH_PRE:
case ARM::t2LDRH_PRE:
case ARM::LDRH_POST:
case ARM::t2LDRH_POST:
case ARM::LDRSH_PRE:
case ARM::t2LDRSH_PRE:
case ARM::LDRSH_POST:
case ARM::t2LDRSH_POST:
case ARM::LDRB_PRE_IMM:
case ARM::LDRB_PRE_REG:
case ARM::t2LDRB_PRE:
case ARM::LDRB_POST_IMM:
case ARM::LDRB_POST_REG:
case ARM::t2LDRB_POST:
case ARM::LDRSB_PRE:
case ARM::t2LDRSB_PRE:
case ARM::LDRSB_POST:
case ARM::t2LDRSB_POST: {
// Rt must be different from Rn.
const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
if (Rt == Rn)
return Error(Operands[3]->getStartLoc(),
"destination register and base register can't be identical");
return false;
}
case ARM::MVE_VLDRBU8_rq:
case ARM::MVE_VLDRBU16_rq:
case ARM::MVE_VLDRBS16_rq:
case ARM::MVE_VLDRBU32_rq:
case ARM::MVE_VLDRBS32_rq:
case ARM::MVE_VLDRHU16_rq:
case ARM::MVE_VLDRHU16_rq_u:
case ARM::MVE_VLDRHU32_rq:
case ARM::MVE_VLDRHU32_rq_u:
case ARM::MVE_VLDRHS32_rq:
case ARM::MVE_VLDRHS32_rq_u:
case ARM::MVE_VLDRWU32_rq:
case ARM::MVE_VLDRWU32_rq_u:
case ARM::MVE_VLDRDU64_rq:
case ARM::MVE_VLDRDU64_rq_u:
case ARM::MVE_VLDRWU32_qi:
case ARM::MVE_VLDRWU32_qi_pre:
case ARM::MVE_VLDRDU64_qi:
case ARM::MVE_VLDRDU64_qi_pre: {
// Qd must be different from Qm.
unsigned QdIdx = 0, QmIdx = 2;
bool QmIsPointer = false;
switch (Opcode) {
case ARM::MVE_VLDRWU32_qi:
case ARM::MVE_VLDRDU64_qi:
QmIdx = 1;
QmIsPointer = true;
break;
case ARM::MVE_VLDRWU32_qi_pre:
case ARM::MVE_VLDRDU64_qi_pre:
QdIdx = 1;
QmIsPointer = true;
break;
}
const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
if (Qd == Qm) {
return Error(Operands[3]->getStartLoc(),
Twine("destination vector register and vector ") +
(QmIsPointer ? "pointer" : "offset") +
" register can't be identical");
}
return false;
}
case ARM::SBFX:
case ARM::t2SBFX:
case ARM::UBFX:
case ARM::t2UBFX: {
// Width must be in range [1, 32-lsb].
unsigned LSB = Inst.getOperand(2).getImm();
unsigned Widthm1 = Inst.getOperand(3).getImm();
if (Widthm1 >= 32 - LSB)
return Error(Operands[5]->getStartLoc(),
"bitfield width must be in range [1,32-lsb]");
return false;
}
// Notionally handles ARM::tLDMIA_UPD too.
case ARM::tLDMIA: {
// If we're parsing Thumb2, the .w variant is available and handles
// most cases that are normally illegal for a Thumb1 LDM instruction.
// We'll make the transformation in processInstruction() if necessary.
//
// Thumb LDM instructions are writeback iff the base register is not
// in the register list.
unsigned Rn = Inst.getOperand(0).getReg();
bool HasWritebackToken =
(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
bool ListContainsBase;
if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
"registers must be in range r0-r7");
// If we should have writeback, then there should be a '!' token.
if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"writeback operator '!' expected");
// If we should not have writeback, there must not be a '!'. This is
// true even for the 32-bit wide encodings.
if (ListContainsBase && HasWritebackToken)
return Error(Operands[3]->getStartLoc(),
"writeback operator '!' not allowed when base register "
"in register list");
if (validatetLDMRegList(Inst, Operands, 3))
return true;
break;
}
case ARM::LDMIA_UPD:
case ARM::LDMDB_UPD:
case ARM::LDMIB_UPD:
case ARM::LDMDA_UPD:
// ARM variants loading and updating the same register are only officially
// UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
if (!hasV7Ops())
break;
if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
return Error(Operands.back()->getStartLoc(),
"writeback register not allowed in register list");
break;
case ARM::t2LDMIA:
case ARM::t2LDMDB:
if (validatetLDMRegList(Inst, Operands, 3))
return true;
break;
case ARM::t2STMIA:
case ARM::t2STMDB:
if (validatetSTMRegList(Inst, Operands, 3))
return true;
break;
case ARM::t2LDMIA_UPD:
case ARM::t2LDMDB_UPD:
case ARM::t2STMIA_UPD:
case ARM::t2STMDB_UPD:
if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
return Error(Operands.back()->getStartLoc(),
"writeback register not allowed in register list");
if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
if (validatetLDMRegList(Inst, Operands, 3))
return true;
} else {
if (validatetSTMRegList(Inst, Operands, 3))
return true;
}
break;
case ARM::sysLDMIA_UPD:
case ARM::sysLDMDA_UPD:
case ARM::sysLDMDB_UPD:
case ARM::sysLDMIB_UPD:
if (!listContainsReg(Inst, 3, ARM::PC))
return Error(Operands[4]->getStartLoc(),
"writeback register only allowed on system LDM "
"if PC in register-list");
break;
case ARM::sysSTMIA_UPD:
case ARM::sysSTMDA_UPD:
case ARM::sysSTMDB_UPD:
case ARM::sysSTMIB_UPD:
return Error(Operands[2]->getStartLoc(),
"system STM cannot have writeback register");
case ARM::tMUL:
// The second source operand must be the same register as the destination
// operand.
//
// In this case, we must directly check the parsed operands because the
// cvtThumbMultiply() function is written in such a way that it guarantees
// this first statement is always true for the new Inst. Essentially, the
// destination is unconditionally copied into the second source operand
// without checking to see if it matches what we actually parsed.
if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
((ARMOperand &)*Operands[5]).getReg()) &&
(((ARMOperand &)*Operands[3]).getReg() !=
((ARMOperand &)*Operands[4]).getReg())) {
return Error(Operands[3]->getStartLoc(),
"destination register must match source register");
}
break;
// Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
// so only issue a diagnostic for thumb1. The instructions will be
// switched to the t2 encodings in processInstruction() if necessary.
case ARM::tPOP: {
bool ListContainsBase;
if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or pc");
if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
return true;
break;
}
case ARM::tPUSH: {
bool ListContainsBase;
if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or lr");
if (validatetSTMRegList(Inst, Operands, 2))
return true;
break;
}
case ARM::tSTMIA_UPD: {
bool ListContainsBase, InvalidLowList;
InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
0, ListContainsBase);
if (InvalidLowList && !isThumbTwo())
return Error(Operands[4]->getStartLoc(),
"registers must be in range r0-r7");
// This would be converted to a 32-bit stm, but that's not valid if the
// writeback register is in the list.
if (InvalidLowList && ListContainsBase)
return Error(Operands[4]->getStartLoc(),
"writeback operator '!' not allowed when base register "
"in register list");
if (validatetSTMRegList(Inst, Operands, 4))
return true;
break;
}
case ARM::tADDrSP:
// If the non-SP source operand and the destination operand are not the
// same, we need thumb2 (for the wide encoding), or we have an error.
if (!isThumbTwo() &&
Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
return Error(Operands[4]->getStartLoc(),
"source register must be the same as destination");
}
break;
case ARM::t2ADDri:
case ARM::t2ADDri12:
case ARM::t2ADDrr:
case ARM::t2ADDrs:
case ARM::t2SUBri:
case ARM::t2SUBri12:
case ARM::t2SUBrr:
case ARM::t2SUBrs:
if (Inst.getOperand(0).getReg() == ARM::SP &&
Inst.getOperand(1).getReg() != ARM::SP)
return Error(Operands[4]->getStartLoc(),
"source register must be sp if destination is sp");
break;
// Final range checking for Thumb unconditional branch instructions.
case ARM::tB:
if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
return Error(Operands[2]->getStartLoc(), "branch target out of range");
break;
case ARM::t2B: {
int op = (Operands[2]->isImm()) ? 2 : 3;
if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
return Error(Operands[op]->getStartLoc(), "branch target out of range");
break;
}
// Final range checking for Thumb conditional branch instructions.
case ARM::tBcc:
if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
return Error(Operands[2]->getStartLoc(), "branch target out of range");
break;
case ARM::t2Bcc: {
int Op = (Operands[2]->isImm()) ? 2 : 3;
if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
return Error(Operands[Op]->getStartLoc(), "branch target out of range");
break;
}
case ARM::tCBZ:
case ARM::tCBNZ: {
if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
return Error(Operands[2]->getStartLoc(), "branch target out of range");
break;
}
case ARM::MOVi16:
case ARM::MOVTi16:
case ARM::t2MOVi16:
case ARM::t2MOVTi16:
{
// We want to avoid misleadingly allowing something like "mov r0, <symbol>"
// especially when we turn it into a movw and the expression <symbol> does
// not have a :lower16: or :upper16 as part of the expression. We don't
// want the behavior of silently truncating, which can be unexpected and
// lead to bugs that are difficult to find since this is an easy mistake
// to make.
int i = (Operands[3]->isImm()) ? 3 : 4;
ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
if (CE) break;
const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
if (!E) break;
const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
return Error(
Op.getStartLoc(),
"immediate expression for mov requires :lower16: or :upper16");
break;
}
case ARM::HINT:
case ARM::t2HINT: {
unsigned Imm8 = Inst.getOperand(0).getImm();
unsigned Pred = Inst.getOperand(1).getImm();
// ESB is not predicable (pred must be AL). Without the RAS extension, this
// behaves as any other unallocated hint.
if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
"predicable, but condition "
"code specified");
if (Imm8 == 0x14 && Pred != ARMCC::AL)
return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
"predicable, but condition "
"code specified");
break;
}
case ARM::t2BFi:
case ARM::t2BFr:
case ARM::t2BFLi:
case ARM::t2BFLr: {
if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
(Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
return Error(Operands[2]->getStartLoc(),
"branch location out of range or not a multiple of 2");
if (Opcode == ARM::t2BFi) {
if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
return Error(Operands[3]->getStartLoc(),
"branch target out of range or not a multiple of 2");
} else if (Opcode == ARM::t2BFLi) {
if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
return Error(Operands[3]->getStartLoc(),
"branch target out of range or not a multiple of 2");
}
break;
}
case ARM::t2BFic: {
if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
(Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
return Error(Operands[1]->getStartLoc(),
"branch location out of range or not a multiple of 2");
if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
return Error(Operands[2]->getStartLoc(),
"branch target out of range or not a multiple of 2");
assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
"branch location and else branch target should either both be "
"immediates or both labels");
if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
if (Diff != 4 && Diff != 2)
return Error(
Operands[3]->getStartLoc(),
"else branch target must be 2 or 4 greater than the branch location");
}
break;
}
case ARM::t2CLRM: {
for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
if (Inst.getOperand(i).isReg() &&
!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Inst.getOperand(i).getReg())) {
return Error(Operands[2]->getStartLoc(),
"invalid register in register list. Valid registers are "
"r0-r12, lr/r14 and APSR.");
}
}
break;
}
case ARM::DSB:
case ARM::t2DSB: {
if (Inst.getNumOperands() < 2)
break;
unsigned Option = Inst.getOperand(0).getImm();
unsigned Pred = Inst.getOperand(1).getImm();
// SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
if (Option == 0 && Pred != ARMCC::AL)
return Error(Operands[1]->getStartLoc(),
"instruction 'ssbb' is not predicable, but condition code "
"specified");
if (Option == 4 && Pred != ARMCC::AL)
return Error(Operands[1]->getStartLoc(),
"instruction 'pssbb' is not predicable, but condition code "
"specified");
break;
}
case ARM::VMOVRRS: {
// Source registers must be sequential.
const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
if (Sm1 != Sm + 1)
return Error(Operands[5]->getStartLoc(),
"source operands must be sequential");
break;
}
case ARM::VMOVSRR: {
// Destination registers must be sequential.
const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Sm1 != Sm + 1)
return Error(Operands[3]->getStartLoc(),
"destination operands must be sequential");
break;
}
case ARM::VLDMDIA:
case ARM::VSTMDIA: {
ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
auto &RegList = Op.getRegList();
if (RegList.size() < 1 || RegList.size() > 16)
return Error(Operands[3]->getStartLoc(),
"list of registers must be at least 1 and at most 16");
break;
}
case ARM::MVE_VQDMULLs32bh:
case ARM::MVE_VQDMULLs32th:
case ARM::MVE_VCMULf32:
case ARM::MVE_VMULLs32bh:
case ARM::MVE_VMULLs32th:
case ARM::MVE_VMULLu32bh:
case ARM::MVE_VMULLu32th: {
if (Operands[3]->getReg() == Operands[4]->getReg()) {
return Error (Operands[3]->getStartLoc(),
"Qd register and Qn register can't be identical");
}
if (Operands[3]->getReg() == Operands[5]->getReg()) {
return Error (Operands[3]->getStartLoc(),
"Qd register and Qm register can't be identical");
}
break;
}
case ARM::MVE_VMOV_rr_q: {
if (Operands[4]->getReg() != Operands[6]->getReg())
return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
break;
}
case ARM::MVE_VMOV_q_rr: {
if (Operands[2]->getReg() != Operands[4]->getReg())
return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
break;
}
}
return false;
}
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
switch(Opc) {
default: llvm_unreachable("unexpected opcode!");
// VST1LN
case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
// VST2LN
case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
// VST3LN
case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
// VST3
case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
// VST4LN
case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
// VST4
case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
}
}
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
switch(Opc) {
default: llvm_unreachable("unexpected opcode!");
// VLD1LN
case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
// VLD2LN
case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
// VLD3DUP
case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
// VLD3LN
case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
// VLD3
case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
// VLD4LN
case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
// VLD4DUP
case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
// VLD4
case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
}
}
bool ARMAsmParser::processInstruction(MCInst &Inst,
const OperandVector &Operands,
MCStreamer &Out) {
// Check if we have the wide qualifier, because if it's present we
// must avoid selecting a 16-bit thumb instruction.
bool HasWideQualifier = false;
for (auto &Op : Operands) {
ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
HasWideQualifier = true;
break;
}
}
switch (Inst.getOpcode()) {
case ARM::MVE_VORNIZ0v4i32:
case ARM::MVE_VORNIZ0v8i16:
case ARM::MVE_VORNIZ8v4i32:
case ARM::MVE_VORNIZ8v8i16:
case ARM::MVE_VORNIZ16v4i32:
case ARM::MVE_VORNIZ24v4i32:
case ARM::MVE_VANDIZ0v4i32:
case ARM::MVE_VANDIZ0v8i16:
case ARM::MVE_VANDIZ8v4i32:
case ARM::MVE_VANDIZ8v8i16:
case ARM::MVE_VANDIZ16v4i32:
case ARM::MVE_VANDIZ24v4i32: {
unsigned Opcode;
bool imm16 = false;
switch(Inst.getOpcode()) {
case ARM::MVE_VORNIZ0v4i32: Opcode = ARM::MVE_VORRIZ0v4i32; break;
case ARM::MVE_VORNIZ0v8i16: Opcode = ARM::MVE_VORRIZ0v8i16; imm16 = true; break;
case ARM::MVE_VORNIZ8v4i32: Opcode = ARM::MVE_VORRIZ8v4i32; break;
case ARM::MVE_VORNIZ8v8i16: Opcode = ARM::MVE_VORRIZ8v8i16; imm16 = true; break;
case ARM::MVE_VORNIZ16v4i32: Opcode = ARM::MVE_VORRIZ16v4i32; break;
case ARM::MVE_VORNIZ24v4i32: Opcode = ARM::MVE_VORRIZ24v4i32; break;
case ARM::MVE_VANDIZ0v4i32: Opcode = ARM::MVE_VBICIZ0v4i32; break;
case ARM::MVE_VANDIZ0v8i16: Opcode = ARM::MVE_VBICIZ0v8i16; imm16 = true; break;
case ARM::MVE_VANDIZ8v4i32: Opcode = ARM::MVE_VBICIZ8v4i32; break;
case ARM::MVE_VANDIZ8v8i16: Opcode = ARM::MVE_VBICIZ8v8i16; imm16 = true; break;
case ARM::MVE_VANDIZ16v4i32: Opcode = ARM::MVE_VBICIZ16v4i32; break;
case ARM::MVE_VANDIZ24v4i32: Opcode = ARM::MVE_VBICIZ24v4i32; break;
default: llvm_unreachable("unexpected opcode");
}
MCInst TmpInst;
TmpInst.setOpcode(Opcode);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
// invert immediate
unsigned imm = ~Inst.getOperand(2).getImm() & (imm16 ? 0xffff : 0xffffffff);
TmpInst.addOperand(MCOperand::createImm(imm));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
// Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
case ARM::LDRT_POST:
case ARM::LDRBT_POST: {
const unsigned Opcode =
(Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
: ARM::LDRBT_POST_IMM;
MCInst TmpInst;
TmpInst.setOpcode(Opcode);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::createReg(0));
TmpInst.addOperand(MCOperand::createImm(0));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
// Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
case ARM::STRT_POST:
case ARM::STRBT_POST: {
const unsigned Opcode =
(Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
: ARM::STRBT_POST_IMM;
MCInst TmpInst;
TmpInst.setOpcode(Opcode);
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(MCOperand::createReg(0));
TmpInst.addOperand(MCOperand::createImm(0));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
// Alias for alternate form of 'ADR Rd, #imm' instruction.
case ARM::ADDri: {
if (Inst.getOperand(1).getReg() != ARM::PC ||
Inst.getOperand(5).getReg() != 0 ||
!(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
return false;
MCInst TmpInst;
TmpInst.setOpcode(ARM::ADR);
TmpInst.addOperand(Inst.getOperand(0));
if (Inst.getOperand(2).isImm()) {
// Immediate (mod_imm) will be in its encoded form, we must unencode it
// before passing it to the ADR instruction.
unsigned Enc = Inst.getOperand(2).getImm();
TmpInst.addOperand(MCOperand::createImm(
ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
} else {
// Turn PC-relative expression into absolute expression.
// Reading PC provides the start of the current instruction + 8 and
// the transform to adr is biased by that.
MCSymbol *Dot = getContext().createTempSymbol();
Out.EmitLabel(Dot);
const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
MCSymbolRefExpr::VK_None,
getContext());
const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
getContext());
const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
getContext());
TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
}
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
// Aliases for alternate PC+imm syntax of LDR instructions.
case ARM::t2LDRpcrel:
// Select the narrow version if the immediate will fit.
if (Inst.getOperand(1).getImm() > 0 &&
Inst.getOperand(1).getImm() <= 0xff &&
!HasWideQualifier)
Inst.setOpcode(ARM::tLDRpci);
else
Inst.setOpcode(ARM::t2LDRpci);
return true;
case ARM::t2LDRBpcrel:
Inst.setOpcode(ARM::t2LDRBpci);
return true;
case ARM::t2LDRHpcrel:
Inst.setOpcode(ARM::t2LDRHpci);
return true;
case ARM::t2LDRSBpcrel:
Inst.setOpcode(ARM::t2LDRSBpci);
return true;
case ARM::t2LDRSHpcrel:
Inst.setOpcode(ARM::t2LDRSHpci);
return true;
case ARM::LDRConstPool:
case ARM::tLDRConstPool:
case ARM::t2LDRConstPool: {
// Pseudo instruction ldr rt, =immediate is converted to a
// MOV rt, immediate if immediate is known and representable
// otherwise we create a constant pool entry that we load from.
MCInst TmpInst;
if (Inst.getOpcode() == ARM::LDRConstPool)
TmpInst.setOpcode(ARM::LDRi12);
else if (Inst.getOpcode() == ARM::tLDRConstPool)
TmpInst.setOpcode(ARM::tLDRpci);
else if (Inst.getOpcode() == ARM::t2LDRConstPool)
TmpInst.setOpcode(ARM::t2LDRpci);
const ARMOperand &PoolOperand =
(HasWideQualifier ?
static_cast<ARMOperand &>(*Operands[4]) :
static_cast<ARMOperand &>(*Operands[3]));
const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
// If SubExprVal is a constant we may be able to use a MOV
if (isa<MCConstantExpr>(SubExprVal) &&
Inst.getOperand(0).getReg() != ARM::PC &&
Inst.getOperand(0).getReg() != ARM::SP) {
int64_t Value =
(int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
bool UseMov = true;
bool MovHasS = true;
if (Inst.getOpcode() == ARM::LDRConstPool) {
// ARM Constant
if (ARM_AM::getSOImmVal(Value) != -1) {
Value = ARM_AM::getSOImmVal(Value);
TmpInst.setOpcode(ARM::MOVi);
}
else if (ARM_AM::getSOImmVal(~Value) != -1) {
Value = ARM_AM::getSOImmVal(~Value);
TmpInst.setOpcode(ARM::MVNi);
}
else if (hasV6T2Ops() &&
Value >=0 && Value < 65536) {
TmpInst.setOpcode(ARM::MOVi16);
MovHasS = false;
}
else
UseMov = false;
}
else {
// Thumb/Thumb2 Constant
if (hasThumb2() &&
ARM_AM::getT2SOImmVal(Value) != -1)
TmpInst.setOpcode(ARM::t2MOVi);
else if (hasThumb2() &&
ARM_AM::getT2SOImmVal(~Value) != -1) {
TmpInst.setOpcode(ARM::t2MVNi);
Value = ~Value;
}
else if (hasV8MBaseline() &&
Value >=0 && Value < 65536) {
TmpInst.setOpcode(ARM::t2MOVi16);
MovHasS = false;
}
else
UseMov = false;
}
if (UseMov) {
TmpInst.addOperand(Inst.getOperand(0)); // Rt
TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
if (MovHasS)
TmpInst.addOperand(MCOperand::createReg(0)); // S
Inst = TmpInst;
return true;
}
}
// No opportunity to use MOV/MVN create constant pool
const MCExpr *CPLoc =
getTargetStreamer().addConstantPoolEntry(SubExprVal,
PoolOperand.getStartLoc());
TmpInst.addOperand(Inst.getOperand(0)); // Rt
TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
if (TmpInst.getOpcode() == ARM::LDRi12)
TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
Inst = TmpInst;
return true;
}
// Handle NEON VST complex aliases.
case ARM::VST1LNdWB_register_Asm_8:
case ARM::VST1LNdWB_register_Asm_16:
case ARM::VST1LNdWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VST2LNdWB_register_Asm_8:
case ARM::VST2LNdWB_register_Asm_16:
case ARM::VST2LNdWB_register_Asm_32:
case ARM::VST2LNqWB_register_Asm_16:
case ARM::VST2LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VST3LNdWB_register_Asm_8:
case ARM::VST3LNdWB_register_Asm_16:
case ARM::VST3LNdWB_register_Asm_32:
case ARM::VST3LNqWB_register_Asm_16:
case ARM::VST3LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VST4LNdWB_register_Asm_8:
case ARM::VST4LNdWB_register_Asm_16:
case ARM::VST4LNdWB_register_Asm_32:
case ARM::VST4LNqWB_register_Asm_16:
case ARM::VST4LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VST1LNdWB_fixed_Asm_8:
case ARM::VST1LNdWB_fixed_Asm_16:
case ARM::VST1LNdWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST2LNdWB_fixed_Asm_8:
case ARM::VST2LNdWB_fixed_Asm_16:
case ARM::VST2LNdWB_fixed_Asm_32:
case ARM::VST2LNqWB_fixed_Asm_16:
case ARM::VST2LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST3LNdWB_fixed_Asm_8:
case ARM::VST3LNdWB_fixed_Asm_16:
case ARM::VST3LNdWB_fixed_Asm_32:
case ARM::VST3LNqWB_fixed_Asm_16:
case ARM::VST3LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST4LNdWB_fixed_Asm_8:
case ARM::VST4LNdWB_fixed_Asm_16:
case ARM::VST4LNdWB_fixed_Asm_32:
case ARM::VST4LNqWB_fixed_Asm_16:
case ARM::VST4LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST1LNdAsm_8:
case ARM::VST1LNdAsm_16:
case ARM::VST1LNdAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST2LNdAsm_8:
case ARM::VST2LNdAsm_16:
case ARM::VST2LNdAsm_32:
case ARM::VST2LNqAsm_16:
case ARM::VST2LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST3LNdAsm_8:
case ARM::VST3LNdAsm_16:
case ARM::VST3LNdAsm_32:
case ARM::VST3LNqAsm_16:
case ARM::VST3LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VST4LNdAsm_8:
case ARM::VST4LNdAsm_16:
case ARM::VST4LNdAsm_32:
case ARM::VST4LNqAsm_16:
case ARM::VST4LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// Handle NEON VLD complex aliases.
case ARM::VLD1LNdWB_register_Asm_8:
case ARM::VLD1LNdWB_register_Asm_16:
case ARM::VLD1LNdWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VLD2LNdWB_register_Asm_8:
case ARM::VLD2LNdWB_register_Asm_16:
case ARM::VLD2LNdWB_register_Asm_32:
case ARM::VLD2LNqWB_register_Asm_16:
case ARM::VLD2LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VLD3LNdWB_register_Asm_8:
case ARM::VLD3LNdWB_register_Asm_16:
case ARM::VLD3LNdWB_register_Asm_32:
case ARM::VLD3LNqWB_register_Asm_16:
case ARM::VLD3LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VLD4LNdWB_register_Asm_8:
case ARM::VLD4LNdWB_register_Asm_16:
case ARM::VLD4LNdWB_register_Asm_32:
case ARM::VLD4LNqWB_register_Asm_16:
case ARM::VLD4LNqWB_register_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(4)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(5)); // CondCode
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
case ARM::VLD1LNdWB_fixed_Asm_8:
case ARM::VLD1LNdWB_fixed_Asm_16:
case ARM::VLD1LNdWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD2LNdWB_fixed_Asm_8:
case ARM::VLD2LNdWB_fixed_Asm_16:
case ARM::VLD2LNdWB_fixed_Asm_32:
case ARM::VLD2LNqWB_fixed_Asm_16:
case ARM::VLD2LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD3LNdWB_fixed_Asm_8:
case ARM::VLD3LNdWB_fixed_Asm_16:
case ARM::VLD3LNdWB_fixed_Asm_32:
case ARM::VLD3LNqWB_fixed_Asm_16:
case ARM::VLD3LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD4LNdWB_fixed_Asm_8:
case ARM::VLD4LNdWB_fixed_Asm_16:
case ARM::VLD4LNdWB_fixed_Asm_32:
case ARM::VLD4LNqWB_fixed_Asm_16:
case ARM::VLD4LNqWB_fixed_Asm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD1LNdAsm_8:
case ARM::VLD1LNdAsm_16:
case ARM::VLD1LNdAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD2LNdAsm_8:
case ARM::VLD2LNdAsm_16:
case ARM::VLD2LNdAsm_32:
case ARM::VLD2LNqAsm_16:
case ARM::VLD2LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD3LNdAsm_8:
case ARM::VLD3LNdAsm_16:
case ARM::VLD3LNdAsm_32:
case ARM::VLD3LNqAsm_16:
case ARM::VLD3LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
case ARM::VLD4LNdAsm_8:
case ARM::VLD4LNdAsm_16:
case ARM::VLD4LNdAsm_32:
case ARM::VLD4LNqAsm_16:
case ARM::VLD4LNqAsm_32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(2)); // Rn
TmpInst.addOperand(Inst.getOperand(3)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // lane
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VLD3DUP single 3-element structure to all lanes instructions.
case ARM::VLD3DUPdAsm_8:
case ARM::VLD3DUPdAsm_16:
case ARM::VLD3DUPdAsm_32:
case ARM::VLD3DUPqAsm_8:
case ARM::VLD3DUPqAsm_16:
case ARM::VLD3DUPqAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD3DUPdWB_fixed_Asm_8:
case ARM::VLD3DUPdWB_fixed_Asm_16:
case ARM::VLD3DUPdWB_fixed_Asm_32:
case ARM::VLD3DUPqWB_fixed_Asm_8:
case ARM::VLD3DUPqWB_fixed_Asm_16:
case ARM::VLD3DUPqWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD3DUPdWB_register_Asm_8:
case ARM::VLD3DUPdWB_register_Asm_16:
case ARM::VLD3DUPdWB_register_Asm_32:
case ARM::VLD3DUPqWB_register_Asm_8:
case ARM::VLD3DUPqWB_register_Asm_16:
case ARM::VLD3DUPqWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VLD3 multiple 3-element structure instructions.
case ARM::VLD3dAsm_8:
case ARM::VLD3dAsm_16:
case ARM::VLD3dAsm_32:
case ARM::VLD3qAsm_8:
case ARM::VLD3qAsm_16:
case ARM::VLD3qAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD3dWB_fixed_Asm_8:
case ARM::VLD3dWB_fixed_Asm_16:
case ARM::VLD3dWB_fixed_Asm_32:
case ARM::VLD3qWB_fixed_Asm_8:
case ARM::VLD3qWB_fixed_Asm_16:
case ARM::VLD3qWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD3dWB_register_Asm_8:
case ARM::VLD3dWB_register_Asm_16:
case ARM::VLD3dWB_register_Asm_32:
case ARM::VLD3qWB_register_Asm_8:
case ARM::VLD3qWB_register_Asm_16:
case ARM::VLD3qWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VLD4DUP single 3-element structure to all lanes instructions.
case ARM::VLD4DUPdAsm_8:
case ARM::VLD4DUPdAsm_16:
case ARM::VLD4DUPdAsm_32:
case ARM::VLD4DUPqAsm_8:
case ARM::VLD4DUPqAsm_16:
case ARM::VLD4DUPqAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD4DUPdWB_fixed_Asm_8:
case ARM::VLD4DUPdWB_fixed_Asm_16:
case ARM::VLD4DUPdWB_fixed_Asm_32:
case ARM::VLD4DUPqWB_fixed_Asm_8:
case ARM::VLD4DUPqWB_fixed_Asm_16:
case ARM::VLD4DUPqWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD4DUPdWB_register_Asm_8:
case ARM::VLD4DUPdWB_register_Asm_16:
case ARM::VLD4DUPdWB_register_Asm_32:
case ARM::VLD4DUPqWB_register_Asm_8:
case ARM::VLD4DUPqWB_register_Asm_16:
case ARM::VLD4DUPqWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VLD4 multiple 4-element structure instructions.
case ARM::VLD4dAsm_8:
case ARM::VLD4dAsm_16:
case ARM::VLD4dAsm_32:
case ARM::VLD4qAsm_8:
case ARM::VLD4qAsm_16:
case ARM::VLD4qAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD4dWB_fixed_Asm_8:
case ARM::VLD4dWB_fixed_Asm_16:
case ARM::VLD4dWB_fixed_Asm_32:
case ARM::VLD4qWB_fixed_Asm_8:
case ARM::VLD4qWB_fixed_Asm_16:
case ARM::VLD4qWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VLD4dWB_register_Asm_8:
case ARM::VLD4dWB_register_Asm_16:
case ARM::VLD4dWB_register_Asm_32:
case ARM::VLD4qWB_register_Asm_8:
case ARM::VLD4qWB_register_Asm_16:
case ARM::VLD4qWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VST3 multiple 3-element structure instructions.
case ARM::VST3dAsm_8:
case ARM::VST3dAsm_16:
case ARM::VST3dAsm_32:
case ARM::VST3qAsm_8:
case ARM::VST3qAsm_16:
case ARM::VST3qAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VST3dWB_fixed_Asm_8:
case ARM::VST3dWB_fixed_Asm_16:
case ARM::VST3dWB_fixed_Asm_32:
case ARM::VST3qWB_fixed_Asm_8:
case ARM::VST3qWB_fixed_Asm_16:
case ARM::VST3qWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VST3dWB_register_Asm_8:
case ARM::VST3dWB_register_Asm_16:
case ARM::VST3dWB_register_Asm_32:
case ARM::VST3qWB_register_Asm_8:
case ARM::VST3qWB_register_Asm_16:
case ARM::VST3qWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// VST4 multiple 3-element structure instructions.
case ARM::VST4dAsm_8:
case ARM::VST4dAsm_16:
case ARM::VST4dAsm_32:
case ARM::VST4qAsm_8:
case ARM::VST4qAsm_16:
case ARM::VST4qAsm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VST4dWB_fixed_Asm_8:
case ARM::VST4dWB_fixed_Asm_16:
case ARM::VST4dWB_fixed_Asm_32:
case ARM::VST4qWB_fixed_Asm_8:
case ARM::VST4qWB_fixed_Asm_16:
case ARM::VST4qWB_fixed_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(MCOperand::createReg(0)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::VST4dWB_register_Asm_8:
case ARM::VST4dWB_register_Asm_16:
case ARM::VST4dWB_register_Asm_32:
case ARM::VST4qWB_register_Asm_8:
case ARM::VST4qWB_register_Asm_16:
case ARM::VST4qWB_register_Asm_32: {
MCInst TmpInst;
unsigned Spacing;
TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
TmpInst.addOperand(Inst.getOperand(2)); // alignment
TmpInst.addOperand(Inst.getOperand(3)); // Rm
TmpInst.addOperand(Inst.getOperand(0)); // Vd
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 2));
TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
Spacing * 3));
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
// Handle encoding choice for the shift-immediate instructions.
case ARM::t2LSLri:
case ARM::t2LSRri:
case ARM::t2ASRri:
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
!HasWideQualifier) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
}
// The Thumb1 operands aren't in the same order. Awesome, eh?
MCInst TmpInst;
TmpInst.setOpcode(NewOpc);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(5));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
return false;
// Handle the Thumb2 mode MOV complex aliases.
case ARM::t2MOVsr:
case ARM::t2MOVSsr: {
// Which instruction to expand to depends on the CCOut operand and
// whether we're in an IT block if the register operands are low
// registers.
bool isNarrow = false;
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
isARMLowRegister(Inst.getOperand(2).getReg()) &&
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
!HasWideQualifier)
isNarrow = true;
MCInst TmpInst;
unsigned newOpc;
switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
default: llvm_unreachable("unexpected opcode!");
case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
}
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
if (isNarrow)
TmpInst.addOperand(MCOperand::createReg(
Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // Rm
TmpInst.addOperand(Inst.getOperand(4)); // CondCode
TmpInst.addOperand(Inst.getOperand(5));
if (!isNarrow)
TmpInst.addOperand(MCOperand::createReg(
Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
Inst = TmpInst;
return true;
}
case ARM::t2MOVsi:
case ARM::t2MOVSsi: {
// Which instruction to expand to depends on the CCOut operand and
// whether we're in an IT block if the register operands are low
// registers.
bool isNarrow = false;
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
!HasWideQualifier)
isNarrow = true;
MCInst TmpInst;
unsigned newOpc;
unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
bool isMov = false;
// MOV rd, rm, LSL #0 is actually a MOV instruction
if (Shift == ARM_AM::lsl && Amount == 0) {
isMov = true;
// The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
// MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
// unpredictable in an IT block so the 32-bit encoding T3 has to be used
// instead.
if (inITBlock()) {
isNarrow = false;
}
newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
} else {
switch(Shift) {
default: llvm_unreachable("unexpected opcode!");
case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
}
}
if (Amount == 32) Amount = 0;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
if (isNarrow && !isMov)
TmpInst.addOperand(MCOperand::createReg(
Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
if (newOpc != ARM::t2RRX && !isMov)
TmpInst.addOperand(MCOperand::createImm(Amount));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
if (!isNarrow)
TmpInst.addOperand(MCOperand::createReg(
Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
Inst = TmpInst;
return true;
}
// Handle the ARM mode MOV complex aliases.
case ARM::ASRr:
case ARM::LSRr:
case ARM::LSLr:
case ARM::RORr: {
ARM_AM::ShiftOpc ShiftTy;
switch(Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode!");
case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
case ARM::RORr: ShiftTy = ARM_AM::ror; break;
}
unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
MCInst TmpInst;
TmpInst.setOpcode(ARM::MOVsr);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(Inst.getOperand(2)); // Rm
TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
TmpInst.addOperand(Inst.getOperand(5)); // cc_out
Inst = TmpInst;
return true;
}
case ARM::ASRi:
case ARM::LSRi:
case ARM::LSLi:
case ARM::RORi: {
ARM_AM::ShiftOpc ShiftTy;
switch(Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode!");
case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
case ARM::RORi: ShiftTy = ARM_AM::ror; break;
}
// A shift by zero is a plain MOVr, not a MOVsi.
unsigned Amt = Inst.getOperand(2).getImm();
unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
// A shift by 32 should be encoded as 0 when permitted
if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
Amt = 0;
unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
MCInst TmpInst;
TmpInst.setOpcode(Opc);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
TmpInst.addOperand(Inst.getOperand(1)); // Rn
if (Opc == ARM::MOVsi)
TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
TmpInst.addOperand(Inst.getOperand(5)); // cc_out
Inst = TmpInst;
return true;
}
case ARM::RRXi: {
unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
MCInst TmpInst;
TmpInst.setOpcode(ARM::MOVsi);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4)); // cc_out
Inst = TmpInst;
return true;
}
case ARM::t2LDMIA_UPD: {
// If this is a load of a single register, then we should use
// a post-indexed LDR instruction instead, per the ARM ARM.
if (Inst.getNumOperands() != 5)
return false;
MCInst TmpInst;
TmpInst.setOpcode(ARM::t2LDR_POST);
TmpInst.addOperand(Inst.getOperand(4)); // Rt
TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(MCOperand::createImm(4));
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
case ARM::t2STMDB_UPD: {
// If this is a store of a single register, then we should use
// a pre-indexed STR instruction instead, per the ARM ARM.
if (Inst.getNumOperands() != 5)
return false;
MCInst TmpInst;
TmpInst.setOpcode(ARM::t2STR_PRE);
TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(4)); // Rt
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(MCOperand::createImm(-4));
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
case ARM::LDMIA_UPD:
// If this is a load of a single register via a 'pop', then we should use
// a post-indexed LDR instruction instead, per the ARM ARM.
if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
Inst.getNumOperands() == 5) {
MCInst TmpInst;
TmpInst.setOpcode(ARM::LDR_POST_IMM);
TmpInst.addOperand(Inst.getOperand(4)); // Rt
TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(1)); // Rn
TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
TmpInst.addOperand(MCOperand::createImm(4));
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
break;
case ARM::STMDB_UPD:
// If this is a store of a single register via a 'push', then we should use
// a pre-indexed STR instruction instead, per the ARM ARM.
if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
Inst.getNumOperands() == 5) {
MCInst TmpInst;
TmpInst.setOpcode(ARM::STR_PRE_IMM);
TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
TmpInst.addOperand(Inst.getOperand(4)); // Rt
TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
TmpInst.addOperand(MCOperand::createImm(-4));
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
}
break;
case ARM::t2ADDri12:
// If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
// mnemonic was used (not "addw"), encoding T3 is preferred.
if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
break;
Inst.setOpcode(ARM::t2ADDri);
Inst.addOperand(MCOperand::createReg(0)); // cc_out
break;
case ARM::t2SUBri12:
// If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
// mnemonic was used (not "subw"), encoding T3 is preferred.
if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
break;
Inst.setOpcode(ARM::t2SUBri);
Inst.addOperand(MCOperand::createReg(0)); // cc_out
break;
case ARM::tADDi8:
// If the immediate is in the range 0-7, we want tADDi3 iff Rd was
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tADDi3);
return true;
}
break;
case ARM::tSUBi8:
// If the immediate is in the range 0-7, we want tADDi3 iff Rd was
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tSUBi3);
return true;
}
break;
case ARM::t2ADDri:
case ARM::t2SUBri: {
// If the destination and first source operand are the same, and
// the flags are compatible with the current IT status, use encoding T2
// instead of T3. For compatibility with the system 'as'. Make sure the
// wide encoding wasn't explicit.
if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
!isARMLowRegister(Inst.getOperand(0).getReg()) ||
(Inst.getOperand(2).isImm() &&
(unsigned)Inst.getOperand(2).getImm() > 255) ||
Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
HasWideQualifier)
break;
MCInst TmpInst;
TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
ARM::tADDi8 : ARM::tSUBi8);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(5));
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::t2ADDrr: {
// If the destination and first source operand are the same, and
// there's no setting of the flags, use encoding T2 instead of T3.
// Note that this is only for ADD, not SUB. This mirrors the system
// 'as' behaviour. Also take advantage of ADD being commutative.
// Make sure the wide encoding wasn't explicit.
bool Swap = false;
auto DestReg = Inst.getOperand(0).getReg();
bool Transform = DestReg == Inst.getOperand(1).getReg();
if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
Transform = true;
Swap = true;
}
if (!Transform ||
Inst.getOperand(5).getReg() != 0 ||
HasWideQualifier)
break;
MCInst TmpInst;
TmpInst.setOpcode(ARM::tADDhirr);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
case ARM::tADDrSP:
// If the non-SP source operand and the destination operand are not the
// same, we need to use the 32-bit encoding if it's available.
if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
Inst.setOpcode(ARM::t2ADDrr);
Inst.addOperand(MCOperand::createReg(0)); // cc_out
return true;
}
break;
case ARM::tB:
// A Thumb conditional branch outside of an IT block is a tBcc.
if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
Inst.setOpcode(ARM::tBcc);
return true;
}
break;
case ARM::t2B:
// A Thumb2 conditional branch outside of an IT block is a t2Bcc.
if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
Inst.setOpcode(ARM::t2Bcc);
return true;
}
break;
case ARM::t2Bcc:
// If the conditional is AL or we're in an IT block, we really want t2B.
if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
Inst.setOpcode(ARM::t2B);
return true;
}
break;
case ARM::tBcc:
// If the conditional is AL, we really want tB.
if (Inst.getOperand(1).getImm() == ARMCC::AL) {
Inst.setOpcode(ARM::tB);
return true;
}
break;
case ARM::tLDMIA: {
// If the register list contains any high registers, or if the writeback
// doesn't match what tLDMIA can do, we need to use the 32-bit encoding
// instead if we're in Thumb2. Otherwise, this should have generated
// an error in validateInstruction().
unsigned Rn = Inst.getOperand(0).getReg();
bool hasWritebackToken =
(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
bool listContainsBase;
if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
(!listContainsBase && !hasWritebackToken) ||
(listContainsBase && hasWritebackToken)) {
// 16-bit encoding isn't sufficient. Switch to the 32-bit version.
assert(isThumbTwo());
Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
// If we're switching to the updating version, we need to insert
// the writeback tied operand.
if (hasWritebackToken)
Inst.insert(Inst.begin(),
MCOperand::createReg(Inst.getOperand(0).getReg()));
return true;
}
break;
}
case ARM::tSTMIA_UPD: {
// If the register list contains any high registers, we need to use
// the 32-bit encoding instead if we're in Thumb2. Otherwise, this
// should have generated an error in validateInstruction().
unsigned Rn = Inst.getOperand(0).getReg();
bool listContainsBase;
if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
// 16-bit encoding isn't sufficient. Switch to the 32-bit version.
assert(isThumbTwo());
Inst.setOpcode(ARM::t2STMIA_UPD);
return true;
}
break;
}
case ARM::tPOP: {
bool listContainsBase;
// If the register list contains any high registers, we need to use
// the 32-bit encoding instead if we're in Thumb2. Otherwise, this
// should have generated an error in validateInstruction().
if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
return false;
assert(isThumbTwo());
Inst.setOpcode(ARM::t2LDMIA_UPD);
// Add the base register and writeback operands.
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
return true;
}
case ARM::tPUSH: {
bool listContainsBase;
if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
return false;
assert(isThumbTwo());
Inst.setOpcode(ARM::t2STMDB_UPD);
// Add the base register and writeback operands.
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
return true;
}
case ARM::t2MOVi:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
(Inst.getOperand(1).isImm() &&
(unsigned)Inst.getOperand(1).getImm() <= 255) &&
Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
!HasWideQualifier) {
// The operands aren't in the same order for tMOVi8...
MCInst TmpInst;
TmpInst.setOpcode(ARM::tMOVi8);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(4));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
break;
case ARM::t2MOVr:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(2).getImm() == ARMCC::AL &&
Inst.getOperand(4).getReg() == ARM::CPSR &&
!HasWideQualifier) {
// The operands aren't the same for tMOV[S]r... (no cc_out)
MCInst TmpInst;
TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
return true;
}
break;
case ARM::t2SXTH:
case ARM::t2SXTB:
case ARM::t2UXTH:
case ARM::t2UXTB:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(2).getImm() == 0 &&
!HasWideQualifier) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("Illegal opcode!");
case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
}
// The operands aren't the same for thumb1 (no rotate operand).
MCInst TmpInst;
TmpInst.setOpcode(NewOpc);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
break;
case ARM::MOVsi: {
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
// rrx shifts and asr/lsr of #32 is encoded as 0
if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
return false;
if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
// Shifting by zero is accepted as a vanilla 'MOVr'
MCInst TmpInst;
TmpInst.setOpcode(ARM::MOVr);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
TmpInst.addOperand(Inst.getOperand(5));
Inst = TmpInst;
return true;
}
return false;
}
case ARM::ANDrsi:
case ARM::ORRrsi:
case ARM::EORrsi:
case ARM::BICrsi:
case ARM::SUBrsi:
case ARM::ADDrsi: {
unsigned newOpc;
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
if (SOpc == ARM_AM::rrx) return false;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode!");
case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
case ARM::EORrsi: newOpc = ARM::EORrr; break;
case ARM::BICrsi: newOpc = ARM::BICrr; break;
case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
}
// If the shift is by zero, use the non-shifted instruction definition.
// The exception is for right shifts, where 0 == 32
if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
!(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
MCInst TmpInst;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(4));
TmpInst.addOperand(Inst.getOperand(5));
TmpInst.addOperand(Inst.getOperand(6));
Inst = TmpInst;
return true;
}
return false;
}
case ARM::ITasm:
case ARM::t2IT: {
// Set up the IT block state according to the IT instruction we just
// matched.
assert(!inITBlock() && "nested IT blocks?!");
startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
Inst.getOperand(1).getImm());
break;
}
case ARM::t2LSLrr:
case ARM::t2LSRrr:
case ARM::t2ASRrr:
case ARM::t2SBCrr:
case ARM::t2RORrr:
case ARM::t2BICrr:
// Assemblers should use the narrow encodings of these instructions when permissible.
if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
isARMLowRegister(Inst.getOperand(2).getReg())) &&
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
!HasWideQualifier) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
case ARM::t2RORrr: NewOpc = ARM::tROR; break;
case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
}
MCInst TmpInst;
TmpInst.setOpcode(NewOpc);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(5));
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
return false;
case ARM::t2ANDrr:
case ARM::t2EORrr:
case ARM::t2ADCrr:
case ARM::t2ORRrr:
// Assemblers should use the narrow encodings of these instructions when permissible.
// These instructions are special in that they are commutable, so shorter encodings
// are available more often.
if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
isARMLowRegister(Inst.getOperand(2).getReg())) &&
(Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
!HasWideQualifier) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
}
MCInst TmpInst;
TmpInst.setOpcode(NewOpc);
TmpInst.addOperand(Inst.getOperand(0));
TmpInst.addOperand(Inst.getOperand(5));
if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
TmpInst.addOperand(Inst.getOperand(1));
TmpInst.addOperand(Inst.getOperand(2));
} else {
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(1));
}
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
return true;
}
return false;
case ARM::MVE_VPST:
case ARM::MVE_VPTv16i8:
case ARM::MVE_VPTv8i16:
case ARM::MVE_VPTv4i32:
case ARM::MVE_VPTv16u8:
case ARM::MVE_VPTv8u16:
case ARM::MVE_VPTv4u32:
case ARM::MVE_VPTv16s8:
case ARM::MVE_VPTv8s16:
case ARM::MVE_VPTv4s32:
case ARM::MVE_VPTv4f32:
case ARM::MVE_VPTv8f16:
case ARM::MVE_VPTv16i8r:
case ARM::MVE_VPTv8i16r:
case ARM::MVE_VPTv4i32r:
case ARM::MVE_VPTv16u8r:
case ARM::MVE_VPTv8u16r:
case ARM::MVE_VPTv4u32r:
case ARM::MVE_VPTv16s8r:
case ARM::MVE_VPTv8s16r:
case ARM::MVE_VPTv4s32r:
case ARM::MVE_VPTv4f32r:
case ARM::MVE_VPTv8f16r: {
assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
MCOperand &MO = Inst.getOperand(0);
VPTState.Mask = MO.getImm();
VPTState.CurPosition = 0;
break;
}
}
return false;
}
unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
// 16-bit thumb arithmetic instructions either require or preclude the 'S'
// suffix depending on whether they're in an IT block or not.
unsigned Opc = Inst.getOpcode();
const MCInstrDesc &MCID = MII.get(Opc);
if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
assert(MCID.hasOptionalDef() &&
"optionally flag setting instruction missing optional def operand");
assert(MCID.NumOperands == Inst.getNumOperands() &&
"operand count mismatch!");
// Find the optional-def operand (cc_out).
unsigned OpNo;
for (OpNo = 0;
!MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
++OpNo)
;
// If we're parsing Thumb1, reject it completely.
if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
return Match_RequiresFlagSetting;
// If we're parsing Thumb2, which form is legal depends on whether we're
// in an IT block.
if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
!inITBlock())
return Match_RequiresITBlock;
if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
inITBlock())
return Match_RequiresNotITBlock;
// LSL with zero immediate is not allowed in an IT block
if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
return Match_RequiresNotITBlock;
} else if (isThumbOne()) {
// Some high-register supporting Thumb1 encodings only allow both registers
// to be from r0-r7 when in Thumb2.
if (Opc == ARM::tADDhirr && !hasV6MOps() &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
isARMLowRegister(Inst.getOperand(2).getReg()))
return Match_RequiresThumb2;
// Others only require ARMv6 or later.
else if (Opc == ARM::tMOVr && !hasV6Ops() &&
isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()))
return Match_RequiresV6;
}
// Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
// than the loop below can handle, so it uses the GPRnopc register class and
// we do SP handling here.
if (Opc == ARM::t2MOVr && !hasV8Ops())
{
// SP as both source and destination is not allowed
if (Inst.getOperand(0).getReg() == ARM::SP &&
Inst.getOperand(1).getReg() == ARM::SP)
return Match_RequiresV8;
// When flags-setting SP as either source or destination is not allowed
if (Inst.getOperand(4).getReg() == ARM::CPSR &&
(Inst.getOperand(0).getReg() == ARM::SP ||
Inst.getOperand(1).getReg() == ARM::SP))
return Match_RequiresV8;
}
switch (Inst.getOpcode()) {
case ARM::VMRS:
case ARM::VMSR:
case ARM::VMRS_FPCXTS:
case ARM::VMRS_FPCXTNS:
case ARM::VMSR_FPCXTS:
case ARM::VMSR_FPCXTNS:
case ARM::VMRS_FPSCR_NZCVQC:
case ARM::VMSR_FPSCR_NZCVQC:
case ARM::FMSTAT:
case ARM::VMRS_VPR:
case ARM::VMRS_P0:
case ARM::VMSR_VPR:
case ARM::VMSR_P0:
// Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
// ARMv8-A.
if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
(isThumb() && !hasV8Ops()))
return Match_InvalidOperand;
break;
default:
break;
}
for (unsigned I = 0; I < MCID.NumOperands; ++I)
if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
// rGPRRegClass excludes PC, and also excluded SP before ARMv8
const auto &Op = Inst.getOperand(I);
if (!Op.isReg()) {
// This can happen in awkward cases with tied operands, e.g. a
// writeback load/store with a complex addressing mode in
// which there's an output operand corresponding to the
// updated written-back base register: the Tablegen-generated
// AsmMatcher will have written a placeholder operand to that
// slot in the form of an immediate 0, because it can't
// generate the register part of the complex addressing-mode
// operand ahead of time.
continue;
}
unsigned Reg = Op.getReg();
if ((Reg == ARM::SP) && !hasV8Ops())
return Match_RequiresV8;
else if (Reg == ARM::PC)
return Match_InvalidOperand;
}
return Match_Success;
}
namespace llvm {
template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
return true; // In an assembly source, no need to second-guess
}
} // end namespace llvm
// Returns true if Inst is unpredictable if it is in and IT block, but is not
// the last instruction in the block.
bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
// All branch & call instructions terminate IT blocks with the exception of
// SVC.
if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
return true;
// Any arithmetic instruction which writes to the PC also terminates the IT
// block.
if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
return true;
return false;
}
unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
SmallVectorImpl<NearMissInfo> &NearMisses,
bool MatchingInlineAsm,
bool &EmitInITBlock,
MCStreamer &Out) {
// If we can't use an implicit IT block here, just match as normal.
if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
// Try to match the instruction in an extension of the current IT block (if
// there is one).
if (inImplicitITBlock()) {
extendImplicitITBlock(ITState.Cond);
if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
Match_Success) {
// The match succeded, but we still have to check that the instruction is
// valid in this implicit IT block.
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
if (MCID.isPredicable()) {
ARMCC::CondCodes InstCond =
(ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
.getImm();
ARMCC::CondCodes ITCond = currentITCond();
if (InstCond == ITCond) {
EmitInITBlock = true;
return Match_Success;
} else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
invertCurrentITCondition();
EmitInITBlock = true;
return Match_Success;
}
}
}
rewindImplicitITPosition();
}
// Finish the current IT block, and try to match outside any IT block.
flushPendingInstructions(Out);
unsigned PlainMatchResult =
MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
if (PlainMatchResult == Match_Success) {
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
if (MCID.isPredicable()) {
ARMCC::CondCodes InstCond =
(ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
.getImm();
// Some forms of the branch instruction have their own condition code
// fields, so can be conditionally executed without an IT block.
if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
EmitInITBlock = false;
return Match_Success;
}
if (InstCond == ARMCC::AL) {
EmitInITBlock = false;
return Match_Success;
}
} else {
EmitInITBlock = false;
return Match_Success;
}
}
// Try to match in a new IT block. The matcher doesn't check the actual
// condition, so we create an IT block with a dummy condition, and fix it up
// once we know the actual condition.
startImplicitITBlock();
if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
Match_Success) {
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
if (MCID.isPredicable()) {
ITState.Cond =
(ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
.getImm();
EmitInITBlock = true;
return Match_Success;
}
}
discardImplicitITBlock();
// If none of these succeed, return the error we got when trying to match
// outside any IT blocks.
EmitInITBlock = false;
return PlainMatchResult;
}
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
unsigned VariantID = 0);
static const char *getSubtargetFeatureName(uint64_t Val);
bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out, uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
MCInst Inst;
unsigned MatchResult;
bool PendConditionalInstruction = false;
SmallVector<NearMissInfo, 4> NearMisses;
MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
PendConditionalInstruction, Out);
switch (MatchResult) {
case Match_Success:
LLVM_DEBUG(dbgs() << "Parsed as: ";
Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
dbgs() << "\n");
// Context sensitive operand constraints aren't handled by the matcher,
// so check them here.
if (validateInstruction(Inst, Operands)) {
// Still progress the IT block, otherwise one wrong condition causes
// nasty cascading errors.
forwardITPosition();
forwardVPTPosition();
return true;
}
{ // processInstruction() updates inITBlock state, we need to save it away
bool wasInITBlock = inITBlock();
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other. E.g.,
// tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
while (processInstruction(Inst, Operands, Out))
LLVM_DEBUG(dbgs() << "Changed to: ";
Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
dbgs() << "\n");
// Only after the instruction is fully processed, we can validate it
if (wasInITBlock && hasV8Ops() && isThumb() &&
!isV8EligibleForIT(&Inst)) {
Warning(IDLoc, "deprecated instruction in IT block");
}
}
// Only move forward at the very end so that everything in validate
// and process gets a consistent answer about whether we're in an IT
// block.
forwardITPosition();
forwardVPTPosition();
// ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
// doesn't actually encode.
if (Inst.getOpcode() == ARM::ITasm)
return false;
Inst.setLoc(IDLoc);
if (PendConditionalInstruction) {
PendingConditionalInsts.push_back(Inst);
if (isITBlockFull() || isITBlockTerminator(Inst))
flushPendingInstructions(Out);
} else {
Out.EmitInstruction(Inst, getSTI());
}
return false;
case Match_NearMisses:
ReportNearMisses(NearMisses, IDLoc, Operands);
return true;
case Match_MnemonicFail: {
FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
std::string Suggestion = ARMMnemonicSpellCheck(
((ARMOperand &)*Operands[0]).getToken(), FBS);
return Error(IDLoc, "invalid instruction" + Suggestion,
((ARMOperand &)*Operands[0]).getLocRange());
}
}
llvm_unreachable("Implement any new match types added!");
}
/// parseDirective parses the arm specific directives
bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
const MCObjectFileInfo::Environment Format =
getContext().getObjectFileInfo()->getObjectFileType();
bool IsMachO = Format == MCObjectFileInfo::IsMachO;
bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
StringRef IDVal = DirectiveID.getIdentifier();
if (IDVal == ".word")
parseLiteralValues(4, DirectiveID.getLoc());
else if (IDVal == ".short" || IDVal == ".hword")
parseLiteralValues(2, DirectiveID.getLoc());
else if (IDVal == ".thumb")
parseDirectiveThumb(DirectiveID.getLoc());
else if (IDVal == ".arm")
parseDirectiveARM(DirectiveID.getLoc());
else if (IDVal == ".thumb_func")
parseDirectiveThumbFunc(DirectiveID.getLoc());
else if (IDVal == ".code")
parseDirectiveCode(DirectiveID.getLoc());
else if (IDVal == ".syntax")
parseDirectiveSyntax(DirectiveID.getLoc());
else if (IDVal == ".unreq")
parseDirectiveUnreq(DirectiveID.getLoc());
else if (IDVal == ".fnend")
parseDirectiveFnEnd(DirectiveID.getLoc());
else if (IDVal == ".cantunwind")
parseDirectiveCantUnwind(DirectiveID.getLoc());
else if (IDVal == ".personality")
parseDirectivePersonality(DirectiveID.getLoc());
else if (IDVal == ".handlerdata")
parseDirectiveHandlerData(DirectiveID.getLoc());
else if (IDVal == ".setfp")
parseDirectiveSetFP(DirectiveID.getLoc());
else if (IDVal == ".pad")
parseDirectivePad(DirectiveID.getLoc());
else if (IDVal == ".save")
parseDirectiveRegSave(DirectiveID.getLoc(), false);
else if (IDVal == ".vsave")
parseDirectiveRegSave(DirectiveID.getLoc(), true);
else if (IDVal == ".ltorg" || IDVal == ".pool")
parseDirectiveLtorg(DirectiveID.getLoc());
else if (IDVal == ".even")
parseDirectiveEven(DirectiveID.getLoc());
else if (IDVal == ".personalityindex")
parseDirectivePersonalityIndex(DirectiveID.getLoc());
else if (IDVal == ".unwind_raw")
parseDirectiveUnwindRaw(DirectiveID.getLoc());
else if (IDVal == ".movsp")
parseDirectiveMovSP(DirectiveID.getLoc());
else if (IDVal == ".arch_extension")
parseDirectiveArchExtension(DirectiveID.getLoc());
else if (IDVal == ".align")
return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
else if (IDVal == ".thumb_set")
parseDirectiveThumbSet(DirectiveID.getLoc());
else if (IDVal == ".inst")
parseDirectiveInst(DirectiveID.getLoc());
else if (IDVal == ".inst.n")
parseDirectiveInst(DirectiveID.getLoc(), 'n');
else if (IDVal == ".inst.w")
parseDirectiveInst(DirectiveID.getLoc(), 'w');
else if (!IsMachO && !IsCOFF) {
if (IDVal == ".arch")
parseDirectiveArch(DirectiveID.getLoc());
else if (IDVal == ".cpu")
parseDirectiveCPU(DirectiveID.getLoc());
else if (IDVal == ".eabi_attribute")
parseDirectiveEabiAttr(DirectiveID.getLoc());
else if (IDVal == ".fpu")
parseDirectiveFPU(DirectiveID.getLoc());
else if (IDVal == ".fnstart")
parseDirectiveFnStart(DirectiveID.getLoc());
else if (IDVal == ".object_arch")
parseDirectiveObjectArch(DirectiveID.getLoc());
else if (IDVal == ".tlsdescseq")
parseDirectiveTLSDescSeq(DirectiveID.getLoc());
else
return true;
} else
return true;
return false;
}
/// parseLiteralValues
/// ::= .hword expression [, expression]*
/// ::= .short expression [, expression]*
/// ::= .word expression [, expression]*
bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
auto parseOne = [&]() -> bool {
const MCExpr *Value;
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitValue(Value, Size, L);
return false;
};
return (parseMany(parseOne));
}
/// parseDirectiveThumb
/// ::= .thumb
bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
check(!hasThumb(), L, "target does not support Thumb mode"))
return true;
if (!isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
return false;
}
/// parseDirectiveARM
/// ::= .arm
bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
check(!hasARM(), L, "target does not support ARM mode"))
return true;
if (isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
return false;
}
void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) {
// We need to flush the current implicit IT block on a label, because it is
// not legal to branch into an IT block.
flushPendingInstructions(getStreamer());
}
void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
if (NextSymbolIsThumb) {
getParser().getStreamer().EmitThumbFunc(Symbol);
NextSymbolIsThumb = false;
}
}
/// parseDirectiveThumbFunc
/// ::= .thumbfunc symbol_name
bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
MCAsmParser &Parser = getParser();
const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
bool IsMachO = Format == MCObjectFileInfo::IsMachO;
// Darwin asm has (optionally) function name after .thumb_func direction
// ELF doesn't
if (IsMachO) {
if (Parser.getTok().is(AsmToken::Identifier) ||
Parser.getTok().is(AsmToken::String)) {
MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
Parser.getTok().getIdentifier());
getParser().getStreamer().EmitThumbFunc(Func);
Parser.Lex();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.thumb_func' directive"))
return true;
return false;
}
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.thumb_func' directive"))
return true;
NextSymbolIsThumb = true;
return false;
}
/// parseDirectiveSyntax
/// ::= .syntax unified | divided
bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
Error(L, "unexpected token in .syntax directive");
return false;
}
StringRef Mode = Tok.getString();
Parser.Lex();
if (check(Mode == "divided" || Mode == "DIVIDED", L,
"'.syntax divided' arm assembly not supported") ||
check(Mode != "unified" && Mode != "UNIFIED", L,
"unrecognized syntax mode in .syntax directive") ||
parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return true;
// TODO tell the MC streamer the mode
// getParser().getStreamer().Emit???();
return false;
}
/// parseDirectiveCode
/// ::= .code 16 | 32
bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Integer))
return Error(L, "unexpected token in .code directive");
int64_t Val = Parser.getTok().getIntVal();
if (Val != 16 && Val != 32) {
Error(L, "invalid operand to .code directive");
return false;
}
Parser.Lex();
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return true;
if (Val == 16) {
if (!hasThumb())
return Error(L, "target does not support Thumb mode");
if (!isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
} else {
if (!hasARM())
return Error(L, "target does not support ARM mode");
if (isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
}
return false;
}
/// parseDirectiveReq
/// ::= name .req registername
bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat the '.req' token.
unsigned Reg;
SMLoc SRegLoc, ERegLoc;
if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
"register name expected") ||
parseToken(AsmToken::EndOfStatement,
"unexpected input in .req directive."))
return true;
if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
return Error(SRegLoc,
"redefinition of '" + Name + "' does not match original.");
return false;
}
/// parseDirectiveUneq
/// ::= .unreq registername
bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
MCAsmParser &Parser = getParser();
if (Parser.getTok().isNot(AsmToken::Identifier))
return Error(L, "unexpected input in .unreq directive.");
RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
Parser.Lex(); // Eat the identifier.
if (parseToken(AsmToken::EndOfStatement,
"unexpected input in '.unreq' directive"))
return true;
return false;
}
// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
// before, if supported by the new target, or emit mapping symbols for the mode
// switch.
void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
if (WasThumb != isThumb()) {
if (WasThumb && hasThumb()) {
// Stay in Thumb mode
SwitchMode();
} else if (!WasThumb && hasARM()) {
// Stay in ARM mode
SwitchMode();
} else {
// Mode switch forced, because the new arch doesn't support the old mode.
getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
: MCAF_Code32);
// Warn about the implcit mode switch. GAS does not switch modes here,
// but instead stays in the old mode, reporting an error on any following
// instructions as the mode does not exist on the target.
Warning(Loc, Twine("new target does not support ") +
(WasThumb ? "thumb" : "arm") + " mode, switching to " +
(!WasThumb ? "thumb" : "arm") + " mode");
}
}
}
/// parseDirectiveArch
/// ::= .arch token
bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
StringRef Arch = getParser().parseStringToEndOfStatement().trim();
ARM::ArchKind ID = ARM::parseArch(Arch);
if (ID == ARM::ArchKind::INVALID)
return Error(L, "Unknown arch name");
bool WasThumb = isThumb();
Triple T;
MCSubtargetInfo &STI = copySTI();
STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
FixModeAfterArchChange(WasThumb, L);
getTargetStreamer().emitArch(ID);
return false;
}
/// parseDirectiveEabiAttr
/// ::= .eabi_attribute int, int [, "str"]
/// ::= .eabi_attribute Tag_name, int [, "str"]
bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
MCAsmParser &Parser = getParser();
int64_t Tag;
SMLoc TagLoc;
TagLoc = Parser.getTok().getLoc();
if (Parser.getTok().is(AsmToken::Identifier)) {
StringRef Name = Parser.getTok().getIdentifier();
Tag = ARMBuildAttrs::AttrTypeFromString(Name);
if (Tag == -1) {
Error(TagLoc, "attribute name not recognised: " + Name);
return false;
}
Parser.Lex();
} else {
const MCExpr *AttrExpr;
TagLoc = Parser.getTok().getLoc();
if (Parser.parseExpression(AttrExpr))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
if (check(!CE, TagLoc, "expected numeric constant"))
return true;
Tag = CE->getValue();
}
if (Parser.parseToken(AsmToken::Comma, "comma expected"))
return true;
StringRef StringValue = "";
bool IsStringValue = false;
int64_t IntegerValue = 0;
bool IsIntegerValue = false;
if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
IsStringValue = true;
else if (Tag == ARMBuildAttrs::compatibility) {
IsStringValue = true;
IsIntegerValue = true;
} else if (Tag < 32 || Tag % 2 == 0)
IsIntegerValue = true;
else if (Tag % 2 == 1)
IsStringValue = true;
else
llvm_unreachable("invalid tag type");
if (IsIntegerValue) {
const MCExpr *ValueExpr;
SMLoc ValueExprLoc = Parser.getTok().getLoc();
if (Parser.parseExpression(ValueExpr))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
if (!CE)
return Error(ValueExprLoc, "expected numeric constant");
IntegerValue = CE->getValue();
}
if (Tag == ARMBuildAttrs::compatibility) {
if (Parser.parseToken(AsmToken::Comma, "comma expected"))
return true;
}
if (IsStringValue) {
if (Parser.getTok().isNot(AsmToken::String))
return Error(Parser.getTok().getLoc(), "bad string constant");
StringValue = Parser.getTok().getStringContents();
Parser.Lex();
}
if (Parser.parseToken(AsmToken::EndOfStatement,
"unexpected token in '.eabi_attribute' directive"))
return true;
if (IsIntegerValue && IsStringValue) {
assert(Tag == ARMBuildAttrs::compatibility);
getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
} else if (IsIntegerValue)
getTargetStreamer().emitAttribute(Tag, IntegerValue);
else if (IsStringValue)
getTargetStreamer().emitTextAttribute(Tag, StringValue);
return false;
}
/// parseDirectiveCPU
/// ::= .cpu str
bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
StringRef CPU = getParser().parseStringToEndOfStatement().trim();
getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
// FIXME: This is using table-gen data, but should be moved to
// ARMTargetParser once that is table-gen'd.
if (!getSTI().isCPUStringValid(CPU))
return Error(L, "Unknown CPU name");
bool WasThumb = isThumb();
MCSubtargetInfo &STI = copySTI();
STI.setDefaultFeatures(CPU, "");
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
FixModeAfterArchChange(WasThumb, L);
return false;
}
/// parseDirectiveFPU
/// ::= .fpu str
bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
SMLoc FPUNameLoc = getTok().getLoc();
StringRef FPU = getParser().parseStringToEndOfStatement().trim();
unsigned ID = ARM::parseFPU(FPU);
std::vector<StringRef> Features;
if (!ARM::getFPUFeatures(ID, Features))
return Error(FPUNameLoc, "Unknown FPU name");
MCSubtargetInfo &STI = copySTI();
for (auto Feature : Features)
STI.ApplyFeatureFlag(Feature);
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
getTargetStreamer().emitFPU(ID);
return false;
}
/// parseDirectiveFnStart
/// ::= .fnstart
bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.fnstart' directive"))
return true;
if (UC.hasFnStart()) {
Error(L, ".fnstart starts before the end of previous one");
UC.emitFnStartLocNotes();
return true;
}
// Reset the unwind directives parser state
UC.reset();
getTargetStreamer().emitFnStart();
UC.recordFnStart(L);
return false;
}
/// parseDirectiveFnEnd
/// ::= .fnend
bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.fnend' directive"))
return true;
// Check the ordering of unwind directives
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .fnend directive");
// Reset the unwind directives parser state
getTargetStreamer().emitFnEnd();
UC.reset();
return false;
}
/// parseDirectiveCantUnwind
/// ::= .cantunwind
bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.cantunwind' directive"))
return true;
UC.recordCantUnwind(L);
// Check the ordering of unwind directives
if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
return true;
if (UC.hasHandlerData()) {
Error(L, ".cantunwind can't be used with .handlerdata directive");
UC.emitHandlerDataLocNotes();
return true;
}
if (UC.hasPersonality()) {
Error(L, ".cantunwind can't be used with .personality directive");
UC.emitPersonalityLocNotes();
return true;
}
getTargetStreamer().emitCantUnwind();
return false;
}
/// parseDirectivePersonality
/// ::= .personality name
bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
MCAsmParser &Parser = getParser();
bool HasExistingPersonality = UC.hasPersonality();
// Parse the name of the personality routine
if (Parser.getTok().isNot(AsmToken::Identifier))
return Error(L, "unexpected input in .personality directive.");
StringRef Name(Parser.getTok().getIdentifier());
Parser.Lex();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.personality' directive"))
return true;
UC.recordPersonality(L);
// Check the ordering of unwind directives
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .personality directive");
if (UC.cantUnwind()) {
Error(L, ".personality can't be used with .cantunwind directive");
UC.emitCantUnwindLocNotes();
return true;
}
if (UC.hasHandlerData()) {
Error(L, ".personality must precede .handlerdata directive");
UC.emitHandlerDataLocNotes();
return true;
}
if (HasExistingPersonality) {
Error(L, "multiple personality directives");
UC.emitPersonalityLocNotes();
return true;
}
MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
getTargetStreamer().emitPersonality(PR);
return false;
}
/// parseDirectiveHandlerData
/// ::= .handlerdata
bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.handlerdata' directive"))
return true;
UC.recordHandlerData(L);
// Check the ordering of unwind directives
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .personality directive");
if (UC.cantUnwind()) {
Error(L, ".handlerdata can't be used with .cantunwind directive");
UC.emitCantUnwindLocNotes();
return true;
}
getTargetStreamer().emitHandlerData();
return false;
}
/// parseDirectiveSetFP
/// ::= .setfp fpreg, spreg [, offset]
bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
MCAsmParser &Parser = getParser();
// Check the ordering of unwind directives
if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
check(UC.hasHandlerData(), L,
".setfp must precede .handlerdata directive"))
return true;
// Parse fpreg
SMLoc FPRegLoc = Parser.getTok().getLoc();
int FPReg = tryParseRegister();
if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
Parser.parseToken(AsmToken::Comma, "comma expected"))
return true;
// Parse spreg
SMLoc SPRegLoc = Parser.getTok().getLoc();
int SPReg = tryParseRegister();
if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
"register should be either $sp or the latest fp register"))
return true;
// Update the frame pointer register
UC.saveFPReg(FPReg);
// Parse offset
int64_t Offset = 0;
if (Parser.parseOptionalToken(AsmToken::Comma)) {
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar))
return Error(Parser.getTok().getLoc(), "'#' expected");
Parser.Lex(); // skip hash token.
const MCExpr *OffsetExpr;
SMLoc ExLoc = Parser.getTok().getLoc();
SMLoc EndLoc;
if (getParser().parseExpression(OffsetExpr, EndLoc))
return Error(ExLoc, "malformed setfp offset");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
if (check(!CE, ExLoc, "setfp offset must be an immediate"))
return true;
Offset = CE->getValue();
}
if (Parser.parseToken(AsmToken::EndOfStatement))
return true;
getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
static_cast<unsigned>(SPReg), Offset);
return false;
}
/// parseDirective
/// ::= .pad offset
bool ARMAsmParser::parseDirectivePad(SMLoc L) {
MCAsmParser &Parser = getParser();
// Check the ordering of unwind directives
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .pad directive");
if (UC.hasHandlerData())
return Error(L, ".pad must precede .handlerdata directive");
// Parse the offset
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar))
return Error(Parser.getTok().getLoc(), "'#' expected");
Parser.Lex(); // skip hash token.
const MCExpr *OffsetExpr;
SMLoc ExLoc = Parser.getTok().getLoc();
SMLoc EndLoc;
if (getParser().parseExpression(OffsetExpr, EndLoc))
return Error(ExLoc, "malformed pad offset");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
if (!CE)
return Error(ExLoc, "pad offset must be an immediate");
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.pad' directive"))
return true;
getTargetStreamer().emitPad(CE->getValue());
return false;
}
/// parseDirectiveRegSave
/// ::= .save { registers }
/// ::= .vsave { registers }
bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
// Check the ordering of unwind directives
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .save or .vsave directives");
if (UC.hasHandlerData())
return Error(L, ".save or .vsave must precede .handlerdata directive");
// RAII object to make sure parsed operands are deleted.
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
// Parse the register list
if (parseRegisterList(Operands) ||
parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return true;
ARMOperand &Op = (ARMOperand &)*Operands[0];
if (!IsVector && !Op.isRegList())
return Error(L, ".save expects GPR registers");
if (IsVector && !Op.isDPRRegList())
return Error(L, ".vsave expects DPR registers");
getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
return false;
}
/// parseDirectiveInst
/// ::= .inst opcode [, ...]
/// ::= .inst.n opcode [, ...]
/// ::= .inst.w opcode [, ...]
bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
int Width = 4;
if (isThumb()) {
switch (Suffix) {
case 'n':
Width = 2;
break;
case 'w':
break;
default:
Width = 0;
break;
}
} else {
if (Suffix)
return Error(Loc, "width suffixes are invalid in ARM mode");
}
auto parseOne = [&]() -> bool {
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
return true;
const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
if (!Value) {
return Error(Loc, "expected constant expression");
}
char CurSuffix = Suffix;
switch (Width) {
case 2:
if (Value->getValue() > 0xffff)
return Error(Loc, "inst.n operand is too big, use inst.w instead");
break;
case 4:
if (Value->getValue() > 0xffffffff)
return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
" operand is too big");
break;
case 0:
// Thumb mode, no width indicated. Guess from the opcode, if possible.
if (Value->getValue() < 0xe800)
CurSuffix = 'n';
else if (Value->getValue() >= 0xe8000000)
CurSuffix = 'w';
else
return Error(Loc, "cannot determine Thumb instruction size, "
"use inst.n/inst.w instead");
break;
default:
llvm_unreachable("only supported widths are 2 and 4");
}
getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
return false;
};
if (parseOptionalToken(AsmToken::EndOfStatement))
return Error(Loc, "expected expression following directive");
if (parseMany(parseOne))
return true;
return false;
}
/// parseDirectiveLtorg
/// ::= .ltorg | .pool
bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return true;
getTargetStreamer().emitCurrentConstantPool();
return false;
}
bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
const MCSection *Section = getStreamer().getCurrentSectionOnly();
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return true;
if (!Section) {
getStreamer().InitSections(false);
Section = getStreamer().getCurrentSectionOnly();
}
assert(Section && "must have section to emit alignment");
if (Section->UseCodeAlign())
getStreamer().EmitCodeAlignment(2);
else
getStreamer().EmitValueToAlignment(2);
return false;
}
/// parseDirectivePersonalityIndex
/// ::= .personalityindex index
bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
MCAsmParser &Parser = getParser();
bool HasExistingPersonality = UC.hasPersonality();
const MCExpr *IndexExpression;
SMLoc IndexLoc = Parser.getTok().getLoc();
if (Parser.parseExpression(IndexExpression) ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.personalityindex' directive")) {
return true;
}
UC.recordPersonalityIndex(L);
if (!UC.hasFnStart()) {
return Error(L, ".fnstart must precede .personalityindex directive");
}
if (UC.cantUnwind()) {
Error(L, ".personalityindex cannot be used with .cantunwind");
UC.emitCantUnwindLocNotes();
return true;
}
if (UC.hasHandlerData()) {
Error(L, ".personalityindex must precede .handlerdata directive");
UC.emitHandlerDataLocNotes();
return true;
}
if (HasExistingPersonality) {
Error(L, "multiple personality directives");
UC.emitPersonalityLocNotes();
return true;
}
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
if (!CE)
return Error(IndexLoc, "index must be a constant number");
if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
return Error(IndexLoc,
"personality routine index should be in range [0-3]");
getTargetStreamer().emitPersonalityIndex(CE->getValue());
return false;
}
/// parseDirectiveUnwindRaw
/// ::= .unwind_raw offset, opcode [, opcode...]
bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
MCAsmParser &Parser = getParser();
int64_t StackOffset;
const MCExpr *OffsetExpr;
SMLoc OffsetLoc = getLexer().getLoc();
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .unwind_raw directives");
if (getParser().parseExpression(OffsetExpr))
return Error(OffsetLoc, "expected expression");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
if (!CE)
return Error(OffsetLoc, "offset must be a constant");
StackOffset = CE->getValue();
if (Parser.parseToken(AsmToken::Comma, "expected comma"))
return true;
SmallVector<uint8_t, 16> Opcodes;
auto parseOne = [&]() -> bool {
const MCExpr *OE;
SMLoc OpcodeLoc = getLexer().getLoc();
if (check(getLexer().is(AsmToken::EndOfStatement) ||
Parser.parseExpression(OE),
OpcodeLoc, "expected opcode expression"))
return true;
const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
if (!OC)
return Error(OpcodeLoc, "opcode value must be a constant");
const int64_t Opcode = OC->getValue();
if (Opcode & ~0xff)
return Error(OpcodeLoc, "invalid opcode");
Opcodes.push_back(uint8_t(Opcode));
return false;
};
// Must have at least 1 element
SMLoc OpcodeLoc = getLexer().getLoc();
if (parseOptionalToken(AsmToken::EndOfStatement))
return Error(OpcodeLoc, "expected opcode expression");
if (parseMany(parseOne))
return true;
getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
return false;
}
/// parseDirectiveTLSDescSeq
/// ::= .tlsdescseq tls-variable
bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::Identifier))
return TokError("expected variable after '.tlsdescseq' directive");
const MCSymbolRefExpr *SRE =
MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
Lex();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.tlsdescseq' directive"))
return true;
getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
return false;
}
/// parseDirectiveMovSP
/// ::= .movsp reg [, #offset]
bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
MCAsmParser &Parser = getParser();
if (!UC.hasFnStart())
return Error(L, ".fnstart must precede .movsp directives");
if (UC.getFPReg() != ARM::SP)
return Error(L, "unexpected .movsp directive");
SMLoc SPRegLoc = Parser.getTok().getLoc();
int SPReg = tryParseRegister();
if (SPReg == -1)
return Error(SPRegLoc, "register expected");
if (SPReg == ARM::SP || SPReg == ARM::PC)
return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
int64_t Offset = 0;
if (Parser.parseOptionalToken(AsmToken::Comma)) {
if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
return true;
const MCExpr *OffsetExpr;
SMLoc OffsetLoc = Parser.getTok().getLoc();
if (Parser.parseExpression(OffsetExpr))
return Error(OffsetLoc, "malformed offset expression");
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
if (!CE)
return Error(OffsetLoc, "offset must be an immediate constant");
Offset = CE->getValue();
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.movsp' directive"))
return true;
getTargetStreamer().emitMovSP(SPReg, Offset);
UC.saveFPReg(SPReg);
return false;
}
/// parseDirectiveObjectArch
/// ::= .object_arch name
bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::Identifier))
return Error(getLexer().getLoc(), "unexpected token");
StringRef Arch = Parser.getTok().getString();
SMLoc ArchLoc = Parser.getTok().getLoc();
Lex();
ARM::ArchKind ID = ARM::parseArch(Arch);
if (ID == ARM::ArchKind::INVALID)
return Error(ArchLoc, "unknown architecture '" + Arch + "'");
if (parseToken(AsmToken::EndOfStatement))
return true;
getTargetStreamer().emitObjectArch(ID);
return false;
}
/// parseDirectiveAlign
/// ::= .align
bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
// NOTE: if this is not the end of the statement, fall back to the target
// agnostic handling for this directive which will correctly handle this.
if (parseOptionalToken(AsmToken::EndOfStatement)) {
// '.align' is target specifically handled to mean 2**2 byte alignment.
const MCSection *Section = getStreamer().getCurrentSectionOnly();
assert(Section && "must have section to emit alignment");
if (Section->UseCodeAlign())
getStreamer().EmitCodeAlignment(4, 0);
else
getStreamer().EmitValueToAlignment(4, 0, 1, 0);
return false;
}
return true;
}
/// parseDirectiveThumbSet
/// ::= .thumb_set name, value
bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
MCAsmParser &Parser = getParser();
StringRef Name;
if (check(Parser.parseIdentifier(Name),
"expected identifier after '.thumb_set'") ||
parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
return true;
MCSymbol *Sym;
const MCExpr *Value;
if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
Parser, Sym, Value))
return true;
getTargetStreamer().emitThumbSet(Sym, Value);
return false;
}
/// Force static initialization.
extern "C" void LLVMInitializeARMAsmParser() {
RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
}
#define GET_REGISTER_MATCHER
#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#define GET_MNEMONIC_SPELL_CHECKER
#include "ARMGenAsmMatcher.inc"
// Some diagnostics need to vary with subtarget features, so they are handled
// here. For example, the DPR class has either 16 or 32 registers, depending
// on the FPU available.
const char *
ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
switch (MatchError) {
// rGPR contains sp starting with ARMv8.
case Match_rGPR:
return hasV8Ops() ? "operand must be a register in range [r0, r14]"
: "operand must be a register in range [r0, r12] or r14";
// DPR contains 16 registers for some FPUs, and 32 for others.
case Match_DPR:
return hasD32() ? "operand must be a register in range [d0, d31]"
: "operand must be a register in range [d0, d15]";
case Match_DPR_RegList:
return hasD32() ? "operand must be a list of registers in range [d0, d31]"
: "operand must be a list of registers in range [d0, d15]";
// For all other diags, use the static string from tablegen.
default:
return getMatchKindDiag(MatchError);
}
}
// Process the list of near-misses, throwing away ones we don't want to report
// to the user, and converting the rest to a source location and string that
// should be reported.
void
ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
SmallVectorImpl<NearMissMessage> &NearMissesOut,
SMLoc IDLoc, OperandVector &Operands) {
// TODO: If operand didn't match, sub in a dummy one and run target
// predicate, so that we can avoid reporting near-misses that are invalid?
// TODO: Many operand types dont have SuperClasses set, so we report
// redundant ones.
// TODO: Some operands are superclasses of registers (e.g.
// MCK_RegShiftedImm), we don't have any way to represent that currently.
// TODO: This is not all ARM-specific, can some of it be factored out?
// Record some information about near-misses that we have already seen, so
// that we can avoid reporting redundant ones. For example, if there are
// variants of an instruction that take 8- and 16-bit immediates, we want
// to only report the widest one.
std::multimap<unsigned, unsigned> OperandMissesSeen;
SmallSet<FeatureBitset, 4> FeatureMissesSeen;
bool ReportedTooFewOperands = false;
// Process the near-misses in reverse order, so that we see more general ones
// first, and so can avoid emitting more specific ones.
for (NearMissInfo &I : reverse(NearMissesIn)) {
switch (I.getKind()) {
case NearMissInfo::NearMissOperand: {
SMLoc OperandLoc =
((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
const char *OperandDiag =
getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
// If we have already emitted a message for a superclass, don't also report
// the sub-class. We consider all operand classes that we don't have a
// specialised diagnostic for to be equal for the propose of this check,
// so that we don't report the generic error multiple times on the same
// operand.
unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
if (std::any_of(PrevReports.first, PrevReports.second,
[DupCheckMatchClass](
const std::pair<unsigned, unsigned> Pair) {
if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
return Pair.second == DupCheckMatchClass;
else
return isSubclass((MatchClassKind)DupCheckMatchClass,
(MatchClassKind)Pair.second);
}))
break;
OperandMissesSeen.insert(
std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
NearMissMessage Message;
Message.Loc = OperandLoc;
if (OperandDiag) {
Message.Message = OperandDiag;
} else if (I.getOperandClass() == InvalidMatchClass) {
Message.Message = "too many operands for instruction";
} else {
Message.Message = "invalid operand for instruction";
LLVM_DEBUG(
dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass())
<< I.getOperandClass() << ", error " << I.getOperandError()
<< ", opcode " << MII.getName(I.getOpcode()) << "\n");
}
NearMissesOut.emplace_back(Message);
break;
}
case NearMissInfo::NearMissFeature: {
const FeatureBitset &MissingFeatures = I.getFeatures();
// Don't report the same set of features twice.
if (FeatureMissesSeen.count(MissingFeatures))
break;
FeatureMissesSeen.insert(MissingFeatures);
// Special case: don't report a feature set which includes arm-mode for
// targets that don't have ARM mode.
if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
break;
// Don't report any near-misses that both require switching instruction
// set, and adding other subtarget features.
if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
MissingFeatures.count() > 1)
break;
if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
MissingFeatures.count() > 1)
break;
if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
(MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
Feature_IsThumbBit})).any())
break;
if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
break;
NearMissMessage Message;
Message.Loc = IDLoc;
raw_svector_ostream OS(Message.Message);
OS << "instruction requires:";
for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
if (MissingFeatures.test(i))
OS << ' ' << getSubtargetFeatureName(i);
NearMissesOut.emplace_back(Message);
break;
}
case NearMissInfo::NearMissPredicate: {
NearMissMessage Message;
Message.Loc = IDLoc;
switch (I.getPredicateError()) {
case Match_RequiresNotITBlock:
Message.Message = "flag setting instruction only valid outside IT block";
break;
case Match_RequiresITBlock:
Message.Message = "instruction only valid inside IT block";
break;
case Match_RequiresV6:
Message.Message = "instruction variant requires ARMv6 or later";
break;
case Match_RequiresThumb2:
Message.Message = "instruction variant requires Thumb2";
break;
case Match_RequiresV8:
Message.Message = "instruction variant requires ARMv8 or later";
break;
case Match_RequiresFlagSetting:
Message.Message = "no flag-preserving variant of this instruction available";
break;
case Match_InvalidOperand:
Message.Message = "invalid operand for instruction";
break;
default:
llvm_unreachable("Unhandled target predicate error");
break;
}
NearMissesOut.emplace_back(Message);
break;
}
case NearMissInfo::NearMissTooFewOperands: {
if (!ReportedTooFewOperands) {
SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
NearMissesOut.emplace_back(NearMissMessage{
EndLoc, StringRef("too few operands for instruction")});
ReportedTooFewOperands = true;
}
break;
}
case NearMissInfo::NoNearMiss:
// This should never leave the matcher.
llvm_unreachable("not a near-miss");
break;
}
}
}
void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
SMLoc IDLoc, OperandVector &Operands) {
SmallVector<NearMissMessage, 4> Messages;
FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
if (Messages.size() == 0) {
// No near-misses were found, so the best we can do is "invalid
// instruction".
Error(IDLoc, "invalid instruction");
} else if (Messages.size() == 1) {
// One near miss was found, report it as the sole error.
Error(Messages[0].Loc, Messages[0].Message);
} else {
// More than one near miss, so report a generic "invalid instruction"
// error, followed by notes for each of the near-misses.
Error(IDLoc, "invalid instruction, any one of the following would fix this:");
for (auto &M : Messages) {
Note(M.Loc, M.Message);
}
}
}
/// parseDirectiveArchExtension
/// ::= .arch_extension [no]feature
bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
// FIXME: This structure should be moved inside ARMTargetParser
// when we start to table-generate them, and we can use the ARM
// flags below, that were generated by table-gen.
static const struct {
const unsigned Kind;
const FeatureBitset ArchCheck;
const FeatureBitset Features;
} Extensions[] = {
{ ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
{ ARM::AEK_CRYPTO, {Feature_HasV8Bit},
{ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
{ ARM::AEK_FP, {Feature_HasV8Bit},
- {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8} },
+ {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
{ (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
{Feature_HasV7Bit, Feature_IsNotMClassBit},
{ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
{ ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
{ARM::FeatureMP} },
{ ARM::AEK_SIMD, {Feature_HasV8Bit},
- {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8} },
+ {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
{ ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
// FIXME: Only available in A-class, isel not predicated
{ ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
{ ARM::AEK_FP16, {Feature_HasV8_2aBit},
{ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
{ ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
{ ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} },
// FIXME: Unsupported extensions.
{ ARM::AEK_OS, {}, {} },
{ ARM::AEK_IWMMXT, {}, {} },
{ ARM::AEK_IWMMXT2, {}, {} },
{ ARM::AEK_MAVERICK, {}, {} },
{ ARM::AEK_XSCALE, {}, {} },
};
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::Identifier))
return Error(getLexer().getLoc(), "expected architecture extension name");
StringRef Name = Parser.getTok().getString();
SMLoc ExtLoc = Parser.getTok().getLoc();
Lex();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.arch_extension' directive"))
return true;
bool EnableFeature = true;
if (Name.startswith_lower("no")) {
EnableFeature = false;
Name = Name.substr(2);
}
unsigned FeatureKind = ARM::parseArchExt(Name);
if (FeatureKind == ARM::AEK_INVALID)
return Error(ExtLoc, "unknown architectural extension: " + Name);
for (const auto &Extension : Extensions) {
if (Extension.Kind != FeatureKind)
continue;
if (Extension.Features.none())
return Error(ExtLoc, "unsupported architectural extension: " + Name);
if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
return Error(ExtLoc, "architectural extension '" + Name +
"' is not "
"allowed for the current base architecture");
MCSubtargetInfo &STI = copySTI();
if (EnableFeature) {
STI.SetFeatureBitsTransitively(Extension.Features);
} else {
STI.ClearFeatureBitsTransitively(Extension.Features);
}
FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
setAvailableFeatures(Features);
return false;
}
return Error(ExtLoc, "unknown architectural extension: " + Name);
}
// Define this matcher function after the auto-generated include so we
// have the match class enum definitions.
unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
unsigned Kind) {
ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
// If the kind is a token for a literal immediate, check if our asm
// operand matches. This is for InstAliases which have a fixed-value
// immediate in the syntax.
switch (Kind) {
default: break;
case MCK__35_0:
if (Op.isImm())
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
if (CE->getValue() == 0)
return Match_Success;
break;
case MCK__35_8:
if (Op.isImm())
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
if (CE->getValue() == 8)
return Match_Success;
break;
case MCK__35_16:
if (Op.isImm())
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
if (CE->getValue() == 16)
return Match_Success;
break;
case MCK_ModImm:
if (Op.isImm()) {
const MCExpr *SOExpr = Op.getImm();
int64_t Value;
if (!SOExpr->evaluateAsAbsolute(Value))
return Match_Success;
assert((Value >= std::numeric_limits<int32_t>::min() &&
Value <= std::numeric_limits<uint32_t>::max()) &&
"expression value must be representable in 32 bits");
}
break;
case MCK_rGPR:
if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
return Match_Success;
return Match_rGPR;
case MCK_GPRPair:
if (Op.isReg() &&
MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
return Match_Success;
break;
}
return Match_InvalidOperand;
}
bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
StringRef ExtraToken) {
if (!hasMVE())
return false;
return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") ||
Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") ||
Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") ||
Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") ||
Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") ||
Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") ||
Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") ||
Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") ||
Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") ||
Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") ||
Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") ||
Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") ||
Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") ||
Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") ||
Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") ||
Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") ||
Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") ||
Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") ||
Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") ||
Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") ||
Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") ||
Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") ||
Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") ||
Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") ||
Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") ||
Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") ||
Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") ||
Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") ||
Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") ||
Mnemonic.startswith("vqabs") ||
(Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") ||
Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") ||
Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") ||
Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") ||
Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") ||
Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") ||
Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") ||
Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") ||
Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") ||
Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") ||
Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") ||
Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") ||
Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") ||
Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") ||
Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") ||
Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") ||
Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") ||
Mnemonic.startswith("vldrb") ||
(Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") ||
(Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") ||
Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") ||
Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") ||
Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") ||
Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") ||
Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") ||
Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") ||
Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") ||
Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") ||
Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") ||
Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") ||
Mnemonic.startswith("vcvt") ||
(Mnemonic.startswith("vmov") &&
!(ExtraToken == ".f16" || ExtraToken == ".32" ||
ExtraToken == ".16" || ExtraToken == ".8"));
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp (revision 356465)
@@ -1,300 +1,300 @@
//===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the ARMTargetStreamer class.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/MC/ConstantPools.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/TargetParser.h"
using namespace llvm;
//
// ARMTargetStreamer Implemenation
//
ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
ARMTargetStreamer::~ARMTargetStreamer() = default;
// The constant pool handling is shared by all ARMTargetStreamer
// implementations.
const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) {
return ConstantPools->addEntry(Streamer, Expr, 4, Loc);
}
void ARMTargetStreamer::emitCurrentConstantPool() {
ConstantPools->emitForCurrentSection(Streamer);
ConstantPools->clearCacheForCurrentSection(Streamer);
}
// finish() - write out any non-empty assembler constant pools.
void ARMTargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
// reset() - Reset any state
void ARMTargetStreamer::reset() {}
void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {
unsigned Size;
char Buffer[4];
const bool LittleEndian = getStreamer().getContext().getAsmInfo()->isLittleEndian();
switch (Suffix) {
case '\0':
Size = 4;
for (unsigned II = 0, IE = Size; II != IE; II++) {
const unsigned I = LittleEndian ? (Size - II - 1) : II;
Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT);
}
break;
case 'n':
case 'w':
Size = (Suffix == 'n' ? 2 : 4);
// Thumb wide instructions are emitted as a pair of 16-bit words of the
// appropriate endianness.
for (unsigned II = 0, IE = Size; II != IE; II = II + 2) {
const unsigned I0 = LittleEndian ? II + 0 : II + 1;
const unsigned I1 = LittleEndian ? II + 1 : II + 0;
Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT);
Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT);
}
break;
default:
llvm_unreachable("Invalid Suffix");
}
getStreamer().EmitBytes(StringRef(Buffer, Size));
}
// The remaining callbacks should be handled separately by each
// streamer.
void ARMTargetStreamer::emitFnStart() {}
void ARMTargetStreamer::emitFnEnd() {}
void ARMTargetStreamer::emitCantUnwind() {}
void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {}
void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {}
void ARMTargetStreamer::emitHandlerData() {}
void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
int64_t Offset) {}
void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {}
void ARMTargetStreamer::emitPad(int64_t Offset) {}
void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
bool isVector) {}
void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset,
const SmallVectorImpl<uint8_t> &Opcodes) {
}
void ARMTargetStreamer::switchVendor(StringRef Vendor) {}
void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {}
void ARMTargetStreamer::emitTextAttribute(unsigned Attribute,
StringRef String) {}
void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
unsigned IntValue,
StringRef StringValue) {}
void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {}
void ARMTargetStreamer::emitArchExtension(unsigned ArchExt) {}
void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {}
void ARMTargetStreamer::emitFPU(unsigned FPU) {}
void ARMTargetStreamer::finishAttributeSection() {}
void
ARMTargetStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) {}
void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) {
if (STI.getCPU() == "xscale")
return ARMBuildAttrs::v5TEJ;
if (STI.hasFeature(ARM::HasV8Ops)) {
if (STI.hasFeature(ARM::FeatureRClass))
return ARMBuildAttrs::v8_R;
return ARMBuildAttrs::v8_A;
} else if (STI.hasFeature(ARM::HasV8_1MMainlineOps))
return ARMBuildAttrs::v8_1_M_Main;
else if (STI.hasFeature(ARM::HasV8MMainlineOps))
return ARMBuildAttrs::v8_M_Main;
else if (STI.hasFeature(ARM::HasV7Ops)) {
if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP))
return ARMBuildAttrs::v7E_M;
return ARMBuildAttrs::v7;
} else if (STI.hasFeature(ARM::HasV6T2Ops))
return ARMBuildAttrs::v6T2;
else if (STI.hasFeature(ARM::HasV8MBaselineOps))
return ARMBuildAttrs::v8_M_Base;
else if (STI.hasFeature(ARM::HasV6MOps))
return ARMBuildAttrs::v6S_M;
else if (STI.hasFeature(ARM::HasV6Ops))
return ARMBuildAttrs::v6;
else if (STI.hasFeature(ARM::HasV5TEOps))
return ARMBuildAttrs::v5TE;
else if (STI.hasFeature(ARM::HasV5TOps))
return ARMBuildAttrs::v5T;
else if (STI.hasFeature(ARM::HasV4TOps))
return ARMBuildAttrs::v4T;
else
return ARMBuildAttrs::v4;
}
static bool isV8M(const MCSubtargetInfo &STI) {
// Note that v8M Baseline is a subset of v6T2!
return (STI.hasFeature(ARM::HasV8MBaselineOps) &&
!STI.hasFeature(ARM::HasV6T2Ops)) ||
STI.hasFeature(ARM::HasV8MMainlineOps);
}
/// Emit the build attributes that only depend on the hardware that we expect
// /to be available, and not on the ABI, or any source-language choices.
void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
switchVendor("aeabi");
const StringRef CPUString = STI.getCPU();
if (!CPUString.empty() && !CPUString.startswith("generic")) {
// FIXME: remove krait check when GNU tools support krait cpu
if (STI.hasFeature(ARM::ProcKrait)) {
emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
// We consider krait as a "cortex-a9" + hwdiv CPU
// Enable hwdiv through ".arch_extension idiv"
if (STI.hasFeature(ARM::FeatureHWDivThumb) ||
STI.hasFeature(ARM::FeatureHWDivARM))
emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM);
} else {
emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
}
}
emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI));
if (STI.hasFeature(ARM::FeatureAClass)) {
emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::ApplicationProfile);
} else if (STI.hasFeature(ARM::FeatureRClass)) {
emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::RealTimeProfile);
} else if (STI.hasFeature(ARM::FeatureMClass)) {
emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::MicroControllerProfile);
}
emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM)
? ARMBuildAttrs::Not_Allowed
: ARMBuildAttrs::Allowed);
if (isV8M(STI)) {
emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
ARMBuildAttrs::AllowThumbDerived);
} else if (STI.hasFeature(ARM::FeatureThumb2)) {
emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
ARMBuildAttrs::AllowThumb32);
} else if (STI.hasFeature(ARM::HasV4TOps)) {
emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
}
if (STI.hasFeature(ARM::FeatureNEON)) {
/* NEON is not exactly a VFP architecture, but GAS emit one of
* neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
if (STI.hasFeature(ARM::FeatureFPARMv8)) {
if (STI.hasFeature(ARM::FeatureCrypto))
emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
else
emitFPU(ARM::FK_NEON_FP_ARMV8);
} else if (STI.hasFeature(ARM::FeatureVFP4))
emitFPU(ARM::FK_NEON_VFPV4);
else
emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16
: ARM::FK_NEON);
// Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
if (STI.hasFeature(ARM::HasV8Ops))
emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
STI.hasFeature(ARM::HasV8_1aOps)
? ARMBuildAttrs::AllowNeonARMv8_1a
: ARMBuildAttrs::AllowNeonARMv8);
} else {
if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP))
// FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
// FPU, but there are two different names for it depending on the CPU.
emitFPU(STI.hasFeature(ARM::FeatureD32)
? ARM::FK_FP_ARMV8
: (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16
: ARM::FK_FPV5_SP_D16));
else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP))
emitFPU(STI.hasFeature(ARM::FeatureD32)
? ARM::FK_VFPV4
: (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16
: ARM::FK_FPV4_SP_D16));
else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP))
emitFPU(
STI.hasFeature(ARM::FeatureD32)
// +d32
? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16
: ARM::FK_VFPV3)
// -d32
: (STI.hasFeature(ARM::FeatureFP64)
? (STI.hasFeature(ARM::FeatureFP16)
? ARM::FK_VFPV3_D16_FP16
: ARM::FK_VFPV3_D16)
: (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16
: ARM::FK_VFPV3XD)));
- else if (STI.hasFeature(ARM::FeatureVFP2_SP))
+ else if (STI.hasFeature(ARM::FeatureVFP2_D16_SP))
emitFPU(ARM::FK_VFPV2);
}
// ABI_HardFP_use attribute to indicate single precision FP.
- if (STI.hasFeature(ARM::FeatureVFP2_SP) && !STI.hasFeature(ARM::FeatureFP64))
+ if (STI.hasFeature(ARM::FeatureVFP2_D16_SP) && !STI.hasFeature(ARM::FeatureFP64))
emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
ARMBuildAttrs::HardFPSinglePrecision);
if (STI.hasFeature(ARM::FeatureFP16))
emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
if (STI.hasFeature(ARM::FeatureMP))
emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
if (STI.hasFeature(ARM::HasMVEFloatOps))
emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEIntegerAndFloat);
else if (STI.hasFeature(ARM::HasMVEIntegerOps))
emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEInteger);
// Hardware divide in ARM mode is part of base arch, starting from ARMv8.
// If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
// It is not possible to produce DisallowDIV: if hwdiv is present in the base
// arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
// AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
// otherwise, the default value (AllowDIVIfExists) applies.
if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops))
emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI))
emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
if (STI.hasFeature(ARM::FeatureStrictAlign))
emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
ARMBuildAttrs::Not_Allowed);
else
emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
ARMBuildAttrs::Allowed);
if (STI.hasFeature(ARM::FeatureTrustZone) &&
STI.hasFeature(ARM::FeatureVirtualization))
emitAttribute(ARMBuildAttrs::Virtualization_use,
ARMBuildAttrs::AllowTZVirtualization);
else if (STI.hasFeature(ARM::FeatureTrustZone))
emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ);
else if (STI.hasFeature(ARM::FeatureVirtualization))
emitAttribute(ARMBuildAttrs::Virtualization_use,
ARMBuildAttrs::AllowVirtualization);
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp (revision 356465)
@@ -1,8530 +1,8589 @@
//===-- MipsAsmParser.cpp - Parse Mips assembly to MCInst instructions ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/MipsABIFlagsSection.h"
#include "MCTargetDesc/MipsABIInfo.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "MipsTargetStreamer.h"
#include "TargetInfo/MipsTargetInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "mips-asm-parser"
namespace llvm {
class MCInstrInfo;
} // end namespace llvm
extern cl::opt<bool> EmitJalrReloc;
namespace {
class MipsAssemblerOptions {
public:
MipsAssemblerOptions(const FeatureBitset &Features_) : Features(Features_) {}
MipsAssemblerOptions(const MipsAssemblerOptions *Opts) {
ATReg = Opts->getATRegIndex();
Reorder = Opts->isReorder();
Macro = Opts->isMacro();
Features = Opts->getFeatures();
}
unsigned getATRegIndex() const { return ATReg; }
bool setATRegIndex(unsigned Reg) {
if (Reg > 31)
return false;
ATReg = Reg;
return true;
}
bool isReorder() const { return Reorder; }
void setReorder() { Reorder = true; }
void setNoReorder() { Reorder = false; }
bool isMacro() const { return Macro; }
void setMacro() { Macro = true; }
void setNoMacro() { Macro = false; }
const FeatureBitset &getFeatures() const { return Features; }
void setFeatures(const FeatureBitset &Features_) { Features = Features_; }
// Set of features that are either architecture features or referenced
// by them (e.g.: FeatureNaN2008 implied by FeatureMips32r6).
// The full table can be found in MipsGenSubtargetInfo.inc (MipsFeatureKV[]).
// The reason we need this mask is explained in the selectArch function.
// FIXME: Ideally we would like TableGen to generate this information.
static const FeatureBitset AllArchRelatedMask;
private:
unsigned ATReg = 1;
bool Reorder = true;
bool Macro = true;
FeatureBitset Features;
};
} // end anonymous namespace
const FeatureBitset MipsAssemblerOptions::AllArchRelatedMask = {
Mips::FeatureMips1, Mips::FeatureMips2, Mips::FeatureMips3,
Mips::FeatureMips3_32, Mips::FeatureMips3_32r2, Mips::FeatureMips4,
Mips::FeatureMips4_32, Mips::FeatureMips4_32r2, Mips::FeatureMips5,
Mips::FeatureMips5_32r2, Mips::FeatureMips32, Mips::FeatureMips32r2,
Mips::FeatureMips32r3, Mips::FeatureMips32r5, Mips::FeatureMips32r6,
Mips::FeatureMips64, Mips::FeatureMips64r2, Mips::FeatureMips64r3,
Mips::FeatureMips64r5, Mips::FeatureMips64r6, Mips::FeatureCnMips,
Mips::FeatureCnMipsP, Mips::FeatureFP64Bit, Mips::FeatureGP64Bit,
Mips::FeatureNaN2008
};
namespace {
class MipsAsmParser : public MCTargetAsmParser {
MipsTargetStreamer &getTargetStreamer() {
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<MipsTargetStreamer &>(TS);
}
MipsABIInfo ABI;
SmallVector<std::unique_ptr<MipsAssemblerOptions>, 2> AssemblerOptions;
MCSymbol *CurrentFn; // Pointer to the function being parsed. It may be a
// nullptr, which indicates that no function is currently
// selected. This usually happens after an '.end func'
// directive.
bool IsLittleEndian;
bool IsPicEnabled;
bool IsCpRestoreSet;
int CpRestoreOffset;
unsigned GPReg;
unsigned CpSaveLocation;
/// If true, then CpSaveLocation is a register, otherwise it's an offset.
bool CpSaveLocationIsRegister;
// Map of register aliases created via the .set directive.
StringMap<AsmToken> RegisterSets;
// Print a warning along with its fix-it message at the given range.
void printWarningWithFixIt(const Twine &Msg, const Twine &FixMsg,
SMRange Range, bool ShowColors = true);
void ConvertXWPOperands(MCInst &Inst, const OperandVector &Operands);
#define GET_ASSEMBLER_HEADER
#include "MipsGenAsmMatcher.inc"
unsigned
checkEarlyTargetMatchPredicate(MCInst &Inst,
const OperandVector &Operands) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
/// Parse a register as used in CFI directives
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool parseParenSuffix(StringRef Name, OperandVector &Operands);
bool parseBracketSuffix(StringRef Name, OperandVector &Operands);
bool mnemonicIsValid(StringRef Mnemonic, unsigned VariantID);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
bool ParseDirective(AsmToken DirectiveID) override;
OperandMatchResultTy parseMemOperand(OperandVector &Operands);
OperandMatchResultTy
matchAnyRegisterNameWithoutDollar(OperandVector &Operands,
StringRef Identifier, SMLoc S);
OperandMatchResultTy matchAnyRegisterWithoutDollar(OperandVector &Operands,
const AsmToken &Token,
SMLoc S);
OperandMatchResultTy matchAnyRegisterWithoutDollar(OperandVector &Operands,
SMLoc S);
OperandMatchResultTy parseAnyRegister(OperandVector &Operands);
OperandMatchResultTy parseImm(OperandVector &Operands);
OperandMatchResultTy parseJumpTarget(OperandVector &Operands);
OperandMatchResultTy parseInvNum(OperandVector &Operands);
OperandMatchResultTy parseRegisterList(OperandVector &Operands);
bool searchSymbolAlias(OperandVector &Operands);
bool parseOperand(OperandVector &, StringRef Mnemonic);
enum MacroExpanderResultTy {
MER_NotAMacro,
MER_Success,
MER_Fail,
};
// Expands assembly pseudo instructions.
MacroExpanderResultTy tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool loadImmediate(int64_t ImmValue, unsigned DstReg, unsigned SrcReg,
bool Is32BitImm, bool IsAddress, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
bool loadAndAddSymbolAddress(const MCExpr *SymExpr, unsigned DstReg,
unsigned SrcReg, bool Is32BitSym, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
bool emitPartialAddress(MipsTargetStreamer &TOut, SMLoc IDLoc, MCSymbol *Sym);
bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
- bool expandLoadImmReal(MCInst &Inst, bool IsSingle, bool IsGPR, bool Is64FPU,
- SMLoc IDLoc, MCStreamer &Out,
- const MCSubtargetInfo *STI);
+ bool expandLoadSingleImmToGPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadSingleImmToFPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadDoubleImmToGPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadDoubleImmToFPR(MCInst &Inst, bool Is64FPU, SMLoc IDLoc,
+ MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandLoadAddress(unsigned DstReg, unsigned BaseReg,
const MCOperand &Offset, bool Is32BitAddress,
SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
void expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI, bool IsLoad);
bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandAliasImmediate(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandCondBranches(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandDivRem(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI, const bool IsMips64,
const bool Signed);
bool expandTrunc(MCInst &Inst, bool IsDouble, bool Is64FPU, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandUsh(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandUxw(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSge(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSgeImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSgtImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandRotation(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandRotationImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandDRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandDRotationImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandAbs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandMulImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandMulO(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandMulOU(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandDMULMacro(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandLoadStoreDMacro(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI, bool IsLoad);
bool expandStoreDM1Macro(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSeq(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool expandSaaAddr(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
bool reportParseError(Twine ErrorMsg);
bool reportParseError(SMLoc Loc, Twine ErrorMsg);
bool parseMemOffset(const MCExpr *&Res, bool isParenExpr);
bool isEvaluated(const MCExpr *Expr);
bool parseSetMips0Directive();
bool parseSetArchDirective();
bool parseSetFeature(uint64_t Feature);
bool isPicAndNotNxxAbi(); // Used by .cpload, .cprestore, and .cpsetup.
bool parseDirectiveCpLoad(SMLoc Loc);
bool parseDirectiveCpLocal(SMLoc Loc);
bool parseDirectiveCpRestore(SMLoc Loc);
bool parseDirectiveCPSetup();
bool parseDirectiveCPReturn();
bool parseDirectiveNaN();
bool parseDirectiveSet();
bool parseDirectiveOption();
bool parseInsnDirective();
bool parseRSectionDirective(StringRef Section);
bool parseSSectionDirective(StringRef Section, unsigned Type);
bool parseSetAtDirective();
bool parseSetNoAtDirective();
bool parseSetMacroDirective();
bool parseSetNoMacroDirective();
bool parseSetMsaDirective();
bool parseSetNoMsaDirective();
bool parseSetNoDspDirective();
bool parseSetReorderDirective();
bool parseSetNoReorderDirective();
bool parseSetMips16Directive();
bool parseSetNoMips16Directive();
bool parseSetFpDirective();
bool parseSetOddSPRegDirective();
bool parseSetNoOddSPRegDirective();
bool parseSetPopDirective();
bool parseSetPushDirective();
bool parseSetSoftFloatDirective();
bool parseSetHardFloatDirective();
bool parseSetMtDirective();
bool parseSetNoMtDirective();
bool parseSetNoCRCDirective();
bool parseSetNoVirtDirective();
bool parseSetNoGINVDirective();
bool parseSetAssignment();
bool parseDirectiveGpWord();
bool parseDirectiveGpDWord();
bool parseDirectiveDtpRelWord();
bool parseDirectiveDtpRelDWord();
bool parseDirectiveTpRelWord();
bool parseDirectiveTpRelDWord();
bool parseDirectiveModule();
bool parseDirectiveModuleFP();
bool parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI,
StringRef Directive);
bool parseInternalDirectiveReallowModule();
bool eatComma(StringRef ErrorStr);
int matchCPURegisterName(StringRef Symbol);
int matchHWRegsRegisterName(StringRef Symbol);
int matchFPURegisterName(StringRef Name);
int matchFCCRegisterName(StringRef Name);
int matchACRegisterName(StringRef Name);
int matchMSA128RegisterName(StringRef Name);
int matchMSA128CtrlRegisterName(StringRef Name);
unsigned getReg(int RC, int RegNo);
/// Returns the internal register number for the current AT. Also checks if
/// the current AT is unavailable (set to $0) and gives an error if it is.
/// This should be used in pseudo-instruction expansions which need AT.
unsigned getATReg(SMLoc Loc);
bool canUseATReg();
bool processInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
// Helper function that checks if the value of a vector index is within the
// boundaries of accepted values for each RegisterKind
// Example: INSERT.B $w0[n], $1 => 16 > n >= 0
bool validateMSAIndex(int Val, int RegKind);
// Selects a new architecture by updating the FeatureBits with the necessary
// info including implied dependencies.
// Internally, it clears all the feature bits related to *any* architecture
// and selects the new one using the ToggleFeature functionality of the
// MCSubtargetInfo object that handles implied dependencies. The reason we
// clear all the arch related bits manually is because ToggleFeature only
// clears the features that imply the feature being cleared and not the
// features implied by the feature being cleared. This is easier to see
// with an example:
// --------------------------------------------------
// | Feature | Implies |
// | -------------------------------------------------|
// | FeatureMips1 | None |
// | FeatureMips2 | FeatureMips1 |
// | FeatureMips3 | FeatureMips2 | FeatureMipsGP64 |
// | FeatureMips4 | FeatureMips3 |
// | ... | |
// --------------------------------------------------
//
// Setting Mips3 is equivalent to set: (FeatureMips3 | FeatureMips2 |
// FeatureMipsGP64 | FeatureMips1)
// Clearing Mips3 is equivalent to clear (FeatureMips3 | FeatureMips4).
void selectArch(StringRef ArchFeature) {
MCSubtargetInfo &STI = copySTI();
FeatureBitset FeatureBits = STI.getFeatureBits();
FeatureBits &= ~MipsAssemblerOptions::AllArchRelatedMask;
STI.setFeatureBits(FeatureBits);
setAvailableFeatures(
ComputeAvailableFeatures(STI.ToggleFeature(ArchFeature)));
AssemblerOptions.back()->setFeatures(STI.getFeatureBits());
}
void setFeatureBits(uint64_t Feature, StringRef FeatureString) {
if (!(getSTI().getFeatureBits()[Feature])) {
MCSubtargetInfo &STI = copySTI();
setAvailableFeatures(
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
AssemblerOptions.back()->setFeatures(STI.getFeatureBits());
}
}
void clearFeatureBits(uint64_t Feature, StringRef FeatureString) {
if (getSTI().getFeatureBits()[Feature]) {
MCSubtargetInfo &STI = copySTI();
setAvailableFeatures(
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
AssemblerOptions.back()->setFeatures(STI.getFeatureBits());
}
}
void setModuleFeatureBits(uint64_t Feature, StringRef FeatureString) {
setFeatureBits(Feature, FeatureString);
AssemblerOptions.front()->setFeatures(getSTI().getFeatureBits());
}
void clearModuleFeatureBits(uint64_t Feature, StringRef FeatureString) {
clearFeatureBits(Feature, FeatureString);
AssemblerOptions.front()->setFeatures(getSTI().getFeatureBits());
}
public:
enum MipsMatchResultTy {
Match_RequiresDifferentSrcAndDst = FIRST_TARGET_MATCH_RESULT_TY,
Match_RequiresDifferentOperands,
Match_RequiresNoZeroRegister,
Match_RequiresSameSrcAndDst,
Match_NoFCCRegisterForCurrentISA,
Match_NonZeroOperandForSync,
Match_NonZeroOperandForMTCX,
Match_RequiresPosSizeRange0_32,
Match_RequiresPosSizeRange33_64,
Match_RequiresPosSizeUImm6,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "MipsGenAsmMatcher.inc"
#undef GET_OPERAND_DIAGNOSTIC_TYPES
};
MipsAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, sti, MII),
ABI(MipsABIInfo::computeTargetABI(Triple(sti.getTargetTriple()),
sti.getCPU(), Options)) {
MCAsmParserExtension::Initialize(parser);
parser.addAliasForDirective(".asciiz", ".asciz");
parser.addAliasForDirective(".hword", ".2byte");
parser.addAliasForDirective(".word", ".4byte");
parser.addAliasForDirective(".dword", ".8byte");
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
// Remember the initial assembler options. The user can not modify these.
AssemblerOptions.push_back(
llvm::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
// Create an assembler options environment for the user to modify.
AssemblerOptions.push_back(
llvm::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
getTargetStreamer().updateABIInfo(*this);
if (!isABI_O32() && !useOddSPReg() != 0)
report_fatal_error("-mno-odd-spreg requires the O32 ABI");
CurrentFn = nullptr;
IsPicEnabled = getContext().getObjectFileInfo()->isPositionIndependent();
IsCpRestoreSet = false;
CpRestoreOffset = -1;
GPReg = ABI.GetGlobalPtr();
const Triple &TheTriple = sti.getTargetTriple();
IsLittleEndian = TheTriple.isLittleEndian();
if (getSTI().getCPU() == "mips64r6" && inMicroMipsMode())
report_fatal_error("microMIPS64R6 is not supported", false);
if (!isABI_O32() && inMicroMipsMode())
report_fatal_error("microMIPS64 is not supported", false);
}
/// True if all of $fcc0 - $fcc7 exist for the current ISA.
bool hasEightFccRegisters() const { return hasMips4() || hasMips32(); }
bool isGP64bit() const {
return getSTI().getFeatureBits()[Mips::FeatureGP64Bit];
}
bool isFP64bit() const {
return getSTI().getFeatureBits()[Mips::FeatureFP64Bit];
}
const MipsABIInfo &getABI() const { return ABI; }
bool isABI_N32() const { return ABI.IsN32(); }
bool isABI_N64() const { return ABI.IsN64(); }
bool isABI_O32() const { return ABI.IsO32(); }
bool isABI_FPXX() const {
return getSTI().getFeatureBits()[Mips::FeatureFPXX];
}
bool useOddSPReg() const {
return !(getSTI().getFeatureBits()[Mips::FeatureNoOddSPReg]);
}
bool inMicroMipsMode() const {
return getSTI().getFeatureBits()[Mips::FeatureMicroMips];
}
bool hasMips1() const {
return getSTI().getFeatureBits()[Mips::FeatureMips1];
}
bool hasMips2() const {
return getSTI().getFeatureBits()[Mips::FeatureMips2];
}
bool hasMips3() const {
return getSTI().getFeatureBits()[Mips::FeatureMips3];
}
bool hasMips4() const {
return getSTI().getFeatureBits()[Mips::FeatureMips4];
}
bool hasMips5() const {
return getSTI().getFeatureBits()[Mips::FeatureMips5];
}
bool hasMips32() const {
return getSTI().getFeatureBits()[Mips::FeatureMips32];
}
bool hasMips64() const {
return getSTI().getFeatureBits()[Mips::FeatureMips64];
}
bool hasMips32r2() const {
return getSTI().getFeatureBits()[Mips::FeatureMips32r2];
}
bool hasMips64r2() const {
return getSTI().getFeatureBits()[Mips::FeatureMips64r2];
}
bool hasMips32r3() const {
return (getSTI().getFeatureBits()[Mips::FeatureMips32r3]);
}
bool hasMips64r3() const {
return (getSTI().getFeatureBits()[Mips::FeatureMips64r3]);
}
bool hasMips32r5() const {
return (getSTI().getFeatureBits()[Mips::FeatureMips32r5]);
}
bool hasMips64r5() const {
return (getSTI().getFeatureBits()[Mips::FeatureMips64r5]);
}
bool hasMips32r6() const {
return getSTI().getFeatureBits()[Mips::FeatureMips32r6];
}
bool hasMips64r6() const {
return getSTI().getFeatureBits()[Mips::FeatureMips64r6];
}
bool hasDSP() const {
return getSTI().getFeatureBits()[Mips::FeatureDSP];
}
bool hasDSPR2() const {
return getSTI().getFeatureBits()[Mips::FeatureDSPR2];
}
bool hasDSPR3() const {
return getSTI().getFeatureBits()[Mips::FeatureDSPR3];
}
bool hasMSA() const {
return getSTI().getFeatureBits()[Mips::FeatureMSA];
}
bool hasCnMips() const {
return (getSTI().getFeatureBits()[Mips::FeatureCnMips]);
}
bool hasCnMipsP() const {
return (getSTI().getFeatureBits()[Mips::FeatureCnMipsP]);
}
bool inPicMode() {
return IsPicEnabled;
}
bool inMips16Mode() const {
return getSTI().getFeatureBits()[Mips::FeatureMips16];
}
bool useTraps() const {
return getSTI().getFeatureBits()[Mips::FeatureUseTCCInDIV];
}
bool useSoftFloat() const {
return getSTI().getFeatureBits()[Mips::FeatureSoftFloat];
}
bool hasMT() const {
return getSTI().getFeatureBits()[Mips::FeatureMT];
}
bool hasCRC() const {
return getSTI().getFeatureBits()[Mips::FeatureCRC];
}
bool hasVirt() const {
return getSTI().getFeatureBits()[Mips::FeatureVirt];
}
bool hasGINV() const {
return getSTI().getFeatureBits()[Mips::FeatureGINV];
}
/// Warn if RegIndex is the same as the current AT.
void warnIfRegIndexIsAT(unsigned RegIndex, SMLoc Loc);
void warnIfNoMacro(SMLoc Loc);
bool isLittle() const { return IsLittleEndian; }
const MCExpr *createTargetUnaryExpr(const MCExpr *E,
AsmToken::TokenKind OperatorToken,
MCContext &Ctx) override {
switch(OperatorToken) {
default:
llvm_unreachable("Unknown token");
return nullptr;
case AsmToken::PercentCall16:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, E, Ctx);
case AsmToken::PercentCall_Hi:
return MipsMCExpr::create(MipsMCExpr::MEK_CALL_HI16, E, Ctx);
case AsmToken::PercentCall_Lo:
return MipsMCExpr::create(MipsMCExpr::MEK_CALL_LO16, E, Ctx);
case AsmToken::PercentDtprel_Hi:
return MipsMCExpr::create(MipsMCExpr::MEK_DTPREL_HI, E, Ctx);
case AsmToken::PercentDtprel_Lo:
return MipsMCExpr::create(MipsMCExpr::MEK_DTPREL_LO, E, Ctx);
case AsmToken::PercentGot:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT, E, Ctx);
case AsmToken::PercentGot_Disp:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, E, Ctx);
case AsmToken::PercentGot_Hi:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_HI16, E, Ctx);
case AsmToken::PercentGot_Lo:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_LO16, E, Ctx);
case AsmToken::PercentGot_Ofst:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_OFST, E, Ctx);
case AsmToken::PercentGot_Page:
return MipsMCExpr::create(MipsMCExpr::MEK_GOT_PAGE, E, Ctx);
case AsmToken::PercentGottprel:
return MipsMCExpr::create(MipsMCExpr::MEK_GOTTPREL, E, Ctx);
case AsmToken::PercentGp_Rel:
return MipsMCExpr::create(MipsMCExpr::MEK_GPREL, E, Ctx);
case AsmToken::PercentHi:
return MipsMCExpr::create(MipsMCExpr::MEK_HI, E, Ctx);
case AsmToken::PercentHigher:
return MipsMCExpr::create(MipsMCExpr::MEK_HIGHER, E, Ctx);
case AsmToken::PercentHighest:
return MipsMCExpr::create(MipsMCExpr::MEK_HIGHEST, E, Ctx);
case AsmToken::PercentLo:
return MipsMCExpr::create(MipsMCExpr::MEK_LO, E, Ctx);
case AsmToken::PercentNeg:
return MipsMCExpr::create(MipsMCExpr::MEK_NEG, E, Ctx);
case AsmToken::PercentPcrel_Hi:
return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_HI16, E, Ctx);
case AsmToken::PercentPcrel_Lo:
return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_LO16, E, Ctx);
case AsmToken::PercentTlsgd:
return MipsMCExpr::create(MipsMCExpr::MEK_TLSGD, E, Ctx);
case AsmToken::PercentTlsldm:
return MipsMCExpr::create(MipsMCExpr::MEK_TLSLDM, E, Ctx);
case AsmToken::PercentTprel_Hi:
return MipsMCExpr::create(MipsMCExpr::MEK_TPREL_HI, E, Ctx);
case AsmToken::PercentTprel_Lo:
return MipsMCExpr::create(MipsMCExpr::MEK_TPREL_LO, E, Ctx);
}
}
};
/// MipsOperand - Instances of this class represent a parsed Mips machine
/// instruction.
class MipsOperand : public MCParsedAsmOperand {
public:
/// Broad categories of register classes
/// The exact class is finalized by the render method.
enum RegKind {
RegKind_GPR = 1, /// GPR32 and GPR64 (depending on isGP64bit())
RegKind_FGR = 2, /// FGR32, FGR64, AFGR64 (depending on context and
/// isFP64bit())
RegKind_FCC = 4, /// FCC
RegKind_MSA128 = 8, /// MSA128[BHWD] (makes no difference which)
RegKind_MSACtrl = 16, /// MSA control registers
RegKind_COP2 = 32, /// COP2
RegKind_ACC = 64, /// HI32DSP, LO32DSP, and ACC64DSP (depending on
/// context).
RegKind_CCR = 128, /// CCR
RegKind_HWRegs = 256, /// HWRegs
RegKind_COP3 = 512, /// COP3
RegKind_COP0 = 1024, /// COP0
/// Potentially any (e.g. $1)
RegKind_Numeric = RegKind_GPR | RegKind_FGR | RegKind_FCC | RegKind_MSA128 |
RegKind_MSACtrl | RegKind_COP2 | RegKind_ACC |
RegKind_CCR | RegKind_HWRegs | RegKind_COP3 | RegKind_COP0
};
private:
enum KindTy {
k_Immediate, /// An immediate (possibly involving symbol references)
k_Memory, /// Base + Offset Memory Address
k_RegisterIndex, /// A register index in one or more RegKind.
k_Token, /// A simple token
k_RegList, /// A physical register list
} Kind;
public:
MipsOperand(KindTy K, MipsAsmParser &Parser)
: MCParsedAsmOperand(), Kind(K), AsmParser(Parser) {}
~MipsOperand() override {
switch (Kind) {
case k_Memory:
delete Mem.Base;
break;
case k_RegList:
delete RegList.List;
break;
case k_Immediate:
case k_RegisterIndex:
case k_Token:
break;
}
}
private:
/// For diagnostics, and checking the assembler temporary
MipsAsmParser &AsmParser;
struct Token {
const char *Data;
unsigned Length;
};
struct RegIdxOp {
unsigned Index; /// Index into the register class
RegKind Kind; /// Bitfield of the kinds it could possibly be
struct Token Tok; /// The input token this operand originated from.
const MCRegisterInfo *RegInfo;
};
struct ImmOp {
const MCExpr *Val;
};
struct MemOp {
MipsOperand *Base;
const MCExpr *Off;
};
struct RegListOp {
SmallVector<unsigned, 10> *List;
};
union {
struct Token Tok;
struct RegIdxOp RegIdx;
struct ImmOp Imm;
struct MemOp Mem;
struct RegListOp RegList;
};
SMLoc StartLoc, EndLoc;
/// Internal constructor for register kinds
static std::unique_ptr<MipsOperand> CreateReg(unsigned Index, StringRef Str,
RegKind RegKind,
const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E,
MipsAsmParser &Parser) {
auto Op = llvm::make_unique<MipsOperand>(k_RegisterIndex, Parser);
Op->RegIdx.Index = Index;
Op->RegIdx.RegInfo = RegInfo;
Op->RegIdx.Kind = RegKind;
Op->RegIdx.Tok.Data = Str.data();
Op->RegIdx.Tok.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
public:
/// Coerce the register to GPR32 and return the real register for the current
/// target.
unsigned getGPR32Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!");
AsmParser.warnIfRegIndexIsAT(RegIdx.Index, StartLoc);
unsigned ClassID = Mips::GPR32RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to GPR32 and return the real register for the current
/// target.
unsigned getGPRMM16Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!");
unsigned ClassID = Mips::GPR32RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to GPR64 and return the real register for the current
/// target.
unsigned getGPR64Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!");
unsigned ClassID = Mips::GPR64RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
private:
/// Coerce the register to AFGR64 and return the real register for the current
/// target.
unsigned getAFGR64Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
if (RegIdx.Index % 2 != 0)
AsmParser.Warning(StartLoc, "Float register should be even.");
return RegIdx.RegInfo->getRegClass(Mips::AFGR64RegClassID)
.getRegister(RegIdx.Index / 2);
}
/// Coerce the register to FGR64 and return the real register for the current
/// target.
unsigned getFGR64Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
return RegIdx.RegInfo->getRegClass(Mips::FGR64RegClassID)
.getRegister(RegIdx.Index);
}
/// Coerce the register to FGR32 and return the real register for the current
/// target.
unsigned getFGR32Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
return RegIdx.RegInfo->getRegClass(Mips::FGR32RegClassID)
.getRegister(RegIdx.Index);
}
/// Coerce the register to FCC and return the real register for the current
/// target.
unsigned getFCCReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_FCC) && "Invalid access!");
return RegIdx.RegInfo->getRegClass(Mips::FCCRegClassID)
.getRegister(RegIdx.Index);
}
/// Coerce the register to MSA128 and return the real register for the current
/// target.
unsigned getMSA128Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_MSA128) && "Invalid access!");
// It doesn't matter which of the MSA128[BHWD] classes we use. They are all
// identical
unsigned ClassID = Mips::MSA128BRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to MSACtrl and return the real register for the
/// current target.
unsigned getMSACtrlReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_MSACtrl) && "Invalid access!");
unsigned ClassID = Mips::MSACtrlRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to COP0 and return the real register for the
/// current target.
unsigned getCOP0Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_COP0) && "Invalid access!");
unsigned ClassID = Mips::COP0RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to COP2 and return the real register for the
/// current target.
unsigned getCOP2Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_COP2) && "Invalid access!");
unsigned ClassID = Mips::COP2RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to COP3 and return the real register for the
/// current target.
unsigned getCOP3Reg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_COP3) && "Invalid access!");
unsigned ClassID = Mips::COP3RegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to ACC64DSP and return the real register for the
/// current target.
unsigned getACC64DSPReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
unsigned ClassID = Mips::ACC64DSPRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to HI32DSP and return the real register for the
/// current target.
unsigned getHI32DSPReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
unsigned ClassID = Mips::HI32DSPRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to LO32DSP and return the real register for the
/// current target.
unsigned getLO32DSPReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
unsigned ClassID = Mips::LO32DSPRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to CCR and return the real register for the
/// current target.
unsigned getCCRReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_CCR) && "Invalid access!");
unsigned ClassID = Mips::CCRRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
/// Coerce the register to HWRegs and return the real register for the
/// current target.
unsigned getHWRegsReg() const {
assert(isRegIdx() && (RegIdx.Kind & RegKind_HWRegs) && "Invalid access!");
unsigned ClassID = Mips::HWRegsRegClassID;
return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
public:
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediate when possible. Null MCExpr = 0.
if (!Expr)
Inst.addOperand(MCOperand::createImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::createImm(CE->getValue()));
else
Inst.addOperand(MCOperand::createExpr(Expr));
}
void addRegOperands(MCInst &Inst, unsigned N) const {
llvm_unreachable("Use a custom parser instead");
}
/// Render the operand to an MCInst as a GPR32
/// Asserts if the wrong number of operands are requested, or the operand
/// is not a k_RegisterIndex compatible with RegKind_GPR
void addGPR32ZeroAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPR32Reg()));
}
void addGPR32NonZeroAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPR32Reg()));
}
void addGPR32AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPR32Reg()));
}
void addGPRMM16AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPRMM16Reg()));
}
void addGPRMM16AsmRegZeroOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPRMM16Reg()));
}
void addGPRMM16AsmRegMovePOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPRMM16Reg()));
}
void addGPRMM16AsmRegMovePPairFirstOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPRMM16Reg()));
}
void addGPRMM16AsmRegMovePPairSecondOperands(MCInst &Inst,
unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPRMM16Reg()));
}
/// Render the operand to an MCInst as a GPR64
/// Asserts if the wrong number of operands are requested, or the operand
/// is not a k_RegisterIndex compatible with RegKind_GPR
void addGPR64AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getGPR64Reg()));
}
void addAFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getAFGR64Reg()));
}
void addStrictlyAFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getAFGR64Reg()));
}
void addStrictlyFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getFGR64Reg()));
}
void addFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getFGR64Reg()));
}
void addFGR32AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getFGR32Reg()));
// FIXME: We ought to do this for -integrated-as without -via-file-asm too.
// FIXME: This should propagate failure up to parseStatement.
if (!AsmParser.useOddSPReg() && RegIdx.Index & 1)
AsmParser.getParser().printError(
StartLoc, "-mno-odd-spreg prohibits the use of odd FPU "
"registers");
}
void addStrictlyFGR32AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getFGR32Reg()));
// FIXME: We ought to do this for -integrated-as without -via-file-asm too.
if (!AsmParser.useOddSPReg() && RegIdx.Index & 1)
AsmParser.Error(StartLoc, "-mno-odd-spreg prohibits the use of odd FPU "
"registers");
}
void addFCCAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getFCCReg()));
}
void addMSA128AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getMSA128Reg()));
}
void addMSACtrlAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getMSACtrlReg()));
}
void addCOP0AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getCOP0Reg()));
}
void addCOP2AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getCOP2Reg()));
}
void addCOP3AsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getCOP3Reg()));
}
void addACC64DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getACC64DSPReg()));
}
void addHI32DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getHI32DSPReg()));
}
void addLO32DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getLO32DSPReg()));
}
void addCCRAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getCCRReg()));
}
void addHWRegsAsmRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getHWRegsReg()));
}
template <unsigned Bits, int Offset = 0, int AdjustOffset = 0>
void addConstantUImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
uint64_t Imm = getConstantImm() - Offset;
Imm &= (1ULL << Bits) - 1;
Imm += Offset;
Imm += AdjustOffset;
Inst.addOperand(MCOperand::createImm(Imm));
}
template <unsigned Bits>
void addSImmOperands(MCInst &Inst, unsigned N) const {
if (isImm() && !isConstantImm()) {
addExpr(Inst, getImm());
return;
}
addConstantSImmOperands<Bits, 0, 0>(Inst, N);
}
template <unsigned Bits>
void addUImmOperands(MCInst &Inst, unsigned N) const {
if (isImm() && !isConstantImm()) {
addExpr(Inst, getImm());
return;
}
addConstantUImmOperands<Bits, 0, 0>(Inst, N);
}
template <unsigned Bits, int Offset = 0, int AdjustOffset = 0>
void addConstantSImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
int64_t Imm = getConstantImm() - Offset;
Imm = SignExtend64<Bits>(Imm);
Imm += Offset;
Imm += AdjustOffset;
Inst.addOperand(MCOperand::createImm(Imm));
}
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCExpr *Expr = getImm();
addExpr(Inst, Expr);
}
void addMemOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(AsmParser.getABI().ArePtrs64bit()
? getMemBase()->getGPR64Reg()
: getMemBase()->getGPR32Reg()));
const MCExpr *Expr = getMemOff();
addExpr(Inst, Expr);
}
void addMicroMipsMemOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getMemBase()->getGPRMM16Reg()));
const MCExpr *Expr = getMemOff();
addExpr(Inst, Expr);
}
void addRegListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
for (auto RegNo : getRegList())
Inst.addOperand(MCOperand::createReg(RegNo));
}
bool isReg() const override {
// As a special case until we sort out the definition of div/divu, accept
// $0/$zero here so that MCK_ZERO works correctly.
return isGPRAsmReg() && RegIdx.Index == 0;
}
bool isRegIdx() const { return Kind == k_RegisterIndex; }
bool isImm() const override { return Kind == k_Immediate; }
bool isConstantImm() const {
int64_t Res;
return isImm() && getImm()->evaluateAsAbsolute(Res);
}
bool isConstantImmz() const {
return isConstantImm() && getConstantImm() == 0;
}
template <unsigned Bits, int Offset = 0> bool isConstantUImm() const {
return isConstantImm() && isUInt<Bits>(getConstantImm() - Offset);
}
template <unsigned Bits> bool isSImm() const {
return isConstantImm() ? isInt<Bits>(getConstantImm()) : isImm();
}
template <unsigned Bits> bool isUImm() const {
return isConstantImm() ? isUInt<Bits>(getConstantImm()) : isImm();
}
template <unsigned Bits> bool isAnyImm() const {
return isConstantImm() ? (isInt<Bits>(getConstantImm()) ||
isUInt<Bits>(getConstantImm()))
: isImm();
}
template <unsigned Bits, int Offset = 0> bool isConstantSImm() const {
return isConstantImm() && isInt<Bits>(getConstantImm() - Offset);
}
template <unsigned Bottom, unsigned Top> bool isConstantUImmRange() const {
return isConstantImm() && getConstantImm() >= Bottom &&
getConstantImm() <= Top;
}
bool isToken() const override {
// Note: It's not possible to pretend that other operand kinds are tokens.
// The matcher emitter checks tokens first.
return Kind == k_Token;
}
bool isMem() const override { return Kind == k_Memory; }
bool isConstantMemOff() const {
return isMem() && isa<MCConstantExpr>(getMemOff());
}
// Allow relocation operators.
// FIXME: This predicate and others need to look through binary expressions
// and determine whether a Value is a constant or not.
template <unsigned Bits, unsigned ShiftAmount = 0>
bool isMemWithSimmOffset() const {
if (!isMem())
return false;
if (!getMemBase()->isGPRAsmReg())
return false;
if (isa<MCTargetExpr>(getMemOff()) ||
(isConstantMemOff() &&
isShiftedInt<Bits, ShiftAmount>(getConstantMemOff())))
return true;
MCValue Res;
bool IsReloc = getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr);
return IsReloc && isShiftedInt<Bits, ShiftAmount>(Res.getConstant());
}
bool isMemWithPtrSizeOffset() const {
if (!isMem())
return false;
if (!getMemBase()->isGPRAsmReg())
return false;
const unsigned PtrBits = AsmParser.getABI().ArePtrs64bit() ? 64 : 32;
if (isa<MCTargetExpr>(getMemOff()) ||
(isConstantMemOff() && isIntN(PtrBits, getConstantMemOff())))
return true;
MCValue Res;
bool IsReloc = getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr);
return IsReloc && isIntN(PtrBits, Res.getConstant());
}
bool isMemWithGRPMM16Base() const {
return isMem() && getMemBase()->isMM16AsmReg();
}
template <unsigned Bits> bool isMemWithUimmOffsetSP() const {
return isMem() && isConstantMemOff() && isUInt<Bits>(getConstantMemOff())
&& getMemBase()->isRegIdx() && (getMemBase()->getGPR32Reg() == Mips::SP);
}
template <unsigned Bits> bool isMemWithUimmWordAlignedOffsetSP() const {
return isMem() && isConstantMemOff() && isUInt<Bits>(getConstantMemOff())
&& (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx()
&& (getMemBase()->getGPR32Reg() == Mips::SP);
}
template <unsigned Bits> bool isMemWithSimmWordAlignedOffsetGP() const {
return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff())
&& (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx()
&& (getMemBase()->getGPR32Reg() == Mips::GP);
}
template <unsigned Bits, unsigned ShiftLeftAmount>
bool isScaledUImm() const {
return isConstantImm() &&
isShiftedUInt<Bits, ShiftLeftAmount>(getConstantImm());
}
template <unsigned Bits, unsigned ShiftLeftAmount>
bool isScaledSImm() const {
if (isConstantImm() &&
isShiftedInt<Bits, ShiftLeftAmount>(getConstantImm()))
return true;
// Operand can also be a symbol or symbol plus
// offset in case of relocations.
if (Kind != k_Immediate)
return false;
MCValue Res;
bool Success = getImm()->evaluateAsRelocatable(Res, nullptr, nullptr);
return Success && isShiftedInt<Bits, ShiftLeftAmount>(Res.getConstant());
}
bool isRegList16() const {
if (!isRegList())
return false;
int Size = RegList.List->size();
if (Size < 2 || Size > 5)
return false;
unsigned R0 = RegList.List->front();
unsigned R1 = RegList.List->back();
if (!((R0 == Mips::S0 && R1 == Mips::RA) ||
(R0 == Mips::S0_64 && R1 == Mips::RA_64)))
return false;
int PrevReg = *RegList.List->begin();
for (int i = 1; i < Size - 1; i++) {
int Reg = (*(RegList.List))[i];
if ( Reg != PrevReg + 1)
return false;
PrevReg = Reg;
}
return true;
}
bool isInvNum() const { return Kind == k_Immediate; }
bool isLSAImm() const {
if (!isConstantImm())
return false;
int64_t Val = getConstantImm();
return 1 <= Val && Val <= 4;
}
bool isRegList() const { return Kind == k_RegList; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
unsigned getReg() const override {
// As a special case until we sort out the definition of div/divu, accept
// $0/$zero here so that MCK_ZERO works correctly.
if (Kind == k_RegisterIndex && RegIdx.Index == 0 &&
RegIdx.Kind & RegKind_GPR)
return getGPR32Reg(); // FIXME: GPR64 too
llvm_unreachable("Invalid access!");
return 0;
}
const MCExpr *getImm() const {
assert((Kind == k_Immediate) && "Invalid access!");
return Imm.Val;
}
int64_t getConstantImm() const {
const MCExpr *Val = getImm();
int64_t Value = 0;
(void)Val->evaluateAsAbsolute(Value);
return Value;
}
MipsOperand *getMemBase() const {
assert((Kind == k_Memory) && "Invalid access!");
return Mem.Base;
}
const MCExpr *getMemOff() const {
assert((Kind == k_Memory) && "Invalid access!");
return Mem.Off;
}
int64_t getConstantMemOff() const {
return static_cast<const MCConstantExpr *>(getMemOff())->getValue();
}
const SmallVectorImpl<unsigned> &getRegList() const {
assert((Kind == k_RegList) && "Invalid access!");
return *(RegList.List);
}
static std::unique_ptr<MipsOperand> CreateToken(StringRef Str, SMLoc S,
MipsAsmParser &Parser) {
auto Op = llvm::make_unique<MipsOperand>(k_Token, Parser);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
/// Create a numeric register (e.g. $1). The exact register remains
/// unresolved until an instruction successfully matches
static std::unique_ptr<MipsOperand>
createNumericReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
LLVM_DEBUG(dbgs() << "createNumericReg(" << Index << ", ...)\n");
return CreateReg(Index, Str, RegKind_Numeric, RegInfo, S, E, Parser);
}
/// Create a register that is definitely a GPR.
/// This is typically only used for named registers such as $gp.
static std::unique_ptr<MipsOperand>
createGPRReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_GPR, RegInfo, S, E, Parser);
}
/// Create a register that is definitely a FGR.
/// This is typically only used for named registers such as $f0.
static std::unique_ptr<MipsOperand>
createFGRReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_FGR, RegInfo, S, E, Parser);
}
/// Create a register that is definitely a HWReg.
/// This is typically only used for named registers such as $hwr_cpunum.
static std::unique_ptr<MipsOperand>
createHWRegsReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_HWRegs, RegInfo, S, E, Parser);
}
/// Create a register that is definitely an FCC.
/// This is typically only used for named registers such as $fcc0.
static std::unique_ptr<MipsOperand>
createFCCReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_FCC, RegInfo, S, E, Parser);
}
/// Create a register that is definitely an ACC.
/// This is typically only used for named registers such as $ac0.
static std::unique_ptr<MipsOperand>
createACCReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_ACC, RegInfo, S, E, Parser);
}
/// Create a register that is definitely an MSA128.
/// This is typically only used for named registers such as $w0.
static std::unique_ptr<MipsOperand>
createMSA128Reg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_MSA128, RegInfo, S, E, Parser);
}
/// Create a register that is definitely an MSACtrl.
/// This is typically only used for named registers such as $msaaccess.
static std::unique_ptr<MipsOperand>
createMSACtrlReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
return CreateReg(Index, Str, RegKind_MSACtrl, RegInfo, S, E, Parser);
}
static std::unique_ptr<MipsOperand>
CreateImm(const MCExpr *Val, SMLoc S, SMLoc E, MipsAsmParser &Parser) {
auto Op = llvm::make_unique<MipsOperand>(k_Immediate, Parser);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<MipsOperand>
CreateMem(std::unique_ptr<MipsOperand> Base, const MCExpr *Off, SMLoc S,
SMLoc E, MipsAsmParser &Parser) {
auto Op = llvm::make_unique<MipsOperand>(k_Memory, Parser);
Op->Mem.Base = Base.release();
Op->Mem.Off = Off;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<MipsOperand>
CreateRegList(SmallVectorImpl<unsigned> &Regs, SMLoc StartLoc, SMLoc EndLoc,
MipsAsmParser &Parser) {
assert(Regs.size() > 0 && "Empty list not allowed");
auto Op = llvm::make_unique<MipsOperand>(k_RegList, Parser);
Op->RegList.List = new SmallVector<unsigned, 10>(Regs.begin(), Regs.end());
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
return Op;
}
bool isGPRZeroAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index == 0;
}
bool isGPRNonZeroAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index > 0 &&
RegIdx.Index <= 31;
}
bool isGPRAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index <= 31;
}
bool isMM16AsmReg() const {
if (!(isRegIdx() && RegIdx.Kind))
return false;
return ((RegIdx.Index >= 2 && RegIdx.Index <= 7)
|| RegIdx.Index == 16 || RegIdx.Index == 17);
}
bool isMM16AsmRegZero() const {
if (!(isRegIdx() && RegIdx.Kind))
return false;
return (RegIdx.Index == 0 ||
(RegIdx.Index >= 2 && RegIdx.Index <= 7) ||
RegIdx.Index == 17);
}
bool isMM16AsmRegMoveP() const {
if (!(isRegIdx() && RegIdx.Kind))
return false;
return (RegIdx.Index == 0 || (RegIdx.Index >= 2 && RegIdx.Index <= 3) ||
(RegIdx.Index >= 16 && RegIdx.Index <= 20));
}
bool isMM16AsmRegMovePPairFirst() const {
if (!(isRegIdx() && RegIdx.Kind))
return false;
return RegIdx.Index >= 4 && RegIdx.Index <= 6;
}
bool isMM16AsmRegMovePPairSecond() const {
if (!(isRegIdx() && RegIdx.Kind))
return false;
return (RegIdx.Index == 21 || RegIdx.Index == 22 ||
(RegIdx.Index >= 5 && RegIdx.Index <= 7));
}
bool isFGRAsmReg() const {
// AFGR64 is $0-$15 but we handle this in getAFGR64()
return isRegIdx() && RegIdx.Kind & RegKind_FGR && RegIdx.Index <= 31;
}
bool isStrictlyFGRAsmReg() const {
// AFGR64 is $0-$15 but we handle this in getAFGR64()
return isRegIdx() && RegIdx.Kind == RegKind_FGR && RegIdx.Index <= 31;
}
bool isHWRegsAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_HWRegs && RegIdx.Index <= 31;
}
bool isCCRAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_CCR && RegIdx.Index <= 31;
}
bool isFCCAsmReg() const {
if (!(isRegIdx() && RegIdx.Kind & RegKind_FCC))
return false;
return RegIdx.Index <= 7;
}
bool isACCAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_ACC && RegIdx.Index <= 3;
}
bool isCOP0AsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_COP0 && RegIdx.Index <= 31;
}
bool isCOP2AsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_COP2 && RegIdx.Index <= 31;
}
bool isCOP3AsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_COP3 && RegIdx.Index <= 31;
}
bool isMSA128AsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_MSA128 && RegIdx.Index <= 31;
}
bool isMSACtrlAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_MSACtrl && RegIdx.Index <= 7;
}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
void print(raw_ostream &OS) const override {
switch (Kind) {
case k_Immediate:
OS << "Imm<";
OS << *Imm.Val;
OS << ">";
break;
case k_Memory:
OS << "Mem<";
Mem.Base->print(OS);
OS << ", ";
OS << *Mem.Off;
OS << ">";
break;
case k_RegisterIndex:
OS << "RegIdx<" << RegIdx.Index << ":" << RegIdx.Kind << ", "
<< StringRef(RegIdx.Tok.Data, RegIdx.Tok.Length) << ">";
break;
case k_Token:
OS << getToken();
break;
case k_RegList:
OS << "RegList< ";
for (auto Reg : (*RegList.List))
OS << Reg << " ";
OS << ">";
break;
}
}
bool isValidForTie(const MipsOperand &Other) const {
if (Kind != Other.Kind)
return false;
switch (Kind) {
default:
llvm_unreachable("Unexpected kind");
return false;
case k_RegisterIndex: {
StringRef Token(RegIdx.Tok.Data, RegIdx.Tok.Length);
StringRef OtherToken(Other.RegIdx.Tok.Data, Other.RegIdx.Tok.Length);
return Token == OtherToken;
}
}
}
}; // class MipsOperand
} // end anonymous namespace
namespace llvm {
extern const MCInstrDesc MipsInsts[];
} // end namespace llvm
static const MCInstrDesc &getInstDesc(unsigned Opcode) {
return MipsInsts[Opcode];
}
static bool hasShortDelaySlot(MCInst &Inst) {
switch (Inst.getOpcode()) {
case Mips::BEQ_MM:
case Mips::BNE_MM:
case Mips::BLTZ_MM:
case Mips::BGEZ_MM:
case Mips::BLEZ_MM:
case Mips::BGTZ_MM:
case Mips::JRC16_MM:
case Mips::JALS_MM:
case Mips::JALRS_MM:
case Mips::JALRS16_MM:
case Mips::BGEZALS_MM:
case Mips::BLTZALS_MM:
return true;
case Mips::J_MM:
return !Inst.getOperand(0).isReg();
default:
return false;
}
}
static const MCSymbol *getSingleMCSymbol(const MCExpr *Expr) {
if (const MCSymbolRefExpr *SRExpr = dyn_cast<MCSymbolRefExpr>(Expr)) {
return &SRExpr->getSymbol();
}
if (const MCBinaryExpr *BExpr = dyn_cast<MCBinaryExpr>(Expr)) {
const MCSymbol *LHSSym = getSingleMCSymbol(BExpr->getLHS());
const MCSymbol *RHSSym = getSingleMCSymbol(BExpr->getRHS());
if (LHSSym)
return LHSSym;
if (RHSSym)
return RHSSym;
return nullptr;
}
if (const MCUnaryExpr *UExpr = dyn_cast<MCUnaryExpr>(Expr))
return getSingleMCSymbol(UExpr->getSubExpr());
return nullptr;
}
static unsigned countMCSymbolRefExpr(const MCExpr *Expr) {
if (isa<MCSymbolRefExpr>(Expr))
return 1;
if (const MCBinaryExpr *BExpr = dyn_cast<MCBinaryExpr>(Expr))
return countMCSymbolRefExpr(BExpr->getLHS()) +
countMCSymbolRefExpr(BExpr->getRHS());
if (const MCUnaryExpr *UExpr = dyn_cast<MCUnaryExpr>(Expr))
return countMCSymbolRefExpr(UExpr->getSubExpr());
return 0;
}
bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
bool ExpandedJalSym = false;
Inst.setLoc(IDLoc);
if (MCID.isBranch() || MCID.isCall()) {
const unsigned Opcode = Inst.getOpcode();
MCOperand Offset;
switch (Opcode) {
default:
break;
case Mips::BBIT0:
case Mips::BBIT032:
case Mips::BBIT1:
case Mips::BBIT132:
assert(hasCnMips() && "instruction only valid for octeon cpus");
LLVM_FALLTHROUGH;
case Mips::BEQ:
case Mips::BNE:
case Mips::BEQ_MM:
case Mips::BNE_MM:
assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
Offset = Inst.getOperand(2);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(),
1LL << (inMicroMipsMode() ? 1 : 2)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEZ:
case Mips::BGTZ:
case Mips::BLEZ:
case Mips::BLTZ:
case Mips::BGEZAL:
case Mips::BLTZAL:
case Mips::BC1F:
case Mips::BC1T:
case Mips::BGEZ_MM:
case Mips::BGTZ_MM:
case Mips::BLEZ_MM:
case Mips::BLTZ_MM:
case Mips::BGEZAL_MM:
case Mips::BLTZAL_MM:
case Mips::BC1F_MM:
case Mips::BC1T_MM:
case Mips::BC1EQZC_MMR6:
case Mips::BC1NEZC_MMR6:
case Mips::BC2EQZC_MMR6:
case Mips::BC2NEZC_MMR6:
assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
Offset = Inst.getOperand(1);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(),
1LL << (inMicroMipsMode() ? 1 : 2)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEC: case Mips::BGEC_MMR6:
case Mips::BLTC: case Mips::BLTC_MMR6:
case Mips::BGEUC: case Mips::BGEUC_MMR6:
case Mips::BLTUC: case Mips::BLTUC_MMR6:
case Mips::BEQC: case Mips::BEQC_MMR6:
case Mips::BNEC: case Mips::BNEC_MMR6:
assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
Offset = Inst.getOperand(2);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BLEZC: case Mips::BLEZC_MMR6:
case Mips::BGEZC: case Mips::BGEZC_MMR6:
case Mips::BGTZC: case Mips::BGTZC_MMR6:
case Mips::BLTZC: case Mips::BLTZC_MMR6:
assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
Offset = Inst.getOperand(1);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZC: case Mips::BEQZC_MMR6:
case Mips::BNEZC: case Mips::BNEZC_MMR6:
assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
Offset = Inst.getOperand(1);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(23, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZ16_MM:
case Mips::BEQZC16_MMR6:
case Mips::BNEZ16_MM:
case Mips::BNEZC16_MMR6:
assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
Offset = Inst.getOperand(1);
if (!Offset.isImm())
break; // We'll deal with this situation later on when applying fixups.
if (!isInt<8>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(), 2LL))
return Error(IDLoc, "branch to misaligned address");
break;
}
}
// SSNOP is deprecated on MIPS32r6/MIPS64r6
// We still accept it but it is a normal nop.
if (hasMips32r6() && Inst.getOpcode() == Mips::SSNOP) {
std::string ISA = hasMips64r6() ? "MIPS64r6" : "MIPS32r6";
Warning(IDLoc, "ssnop is deprecated for " + ISA + " and is equivalent to a "
"nop instruction");
}
if (hasCnMips()) {
const unsigned Opcode = Inst.getOpcode();
MCOperand Opnd;
int Imm;
switch (Opcode) {
default:
break;
case Mips::BBIT0:
case Mips::BBIT032:
case Mips::BBIT1:
case Mips::BBIT132:
assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
// The offset is handled above
Opnd = Inst.getOperand(1);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < 0 || Imm > (Opcode == Mips::BBIT0 ||
Opcode == Mips::BBIT1 ? 63 : 31))
return Error(IDLoc, "immediate operand value out of range");
if (Imm > 31) {
Inst.setOpcode(Opcode == Mips::BBIT0 ? Mips::BBIT032
: Mips::BBIT132);
Inst.getOperand(1).setImm(Imm - 32);
}
break;
case Mips::SEQi:
case Mips::SNEi:
assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (!isInt<10>(Imm))
return Error(IDLoc, "immediate operand value out of range");
break;
}
}
// Warn on division by zero. We're checking here as all instructions get
// processed here, not just the macros that need expansion.
//
// The MIPS backend models most of the divison instructions and macros as
// three operand instructions. The pre-R6 divide instructions however have
// two operands and explicitly define HI/LO as part of the instruction,
// not in the operands.
unsigned FirstOp = 1;
unsigned SecondOp = 2;
switch (Inst.getOpcode()) {
default:
break;
case Mips::SDivIMacro:
case Mips::UDivIMacro:
case Mips::DSDivIMacro:
case Mips::DUDivIMacro:
if (Inst.getOperand(2).getImm() == 0) {
if (Inst.getOperand(1).getReg() == Mips::ZERO ||
Inst.getOperand(1).getReg() == Mips::ZERO_64)
Warning(IDLoc, "dividing zero by zero");
else
Warning(IDLoc, "division by zero");
}
break;
case Mips::DSDIV:
case Mips::SDIV:
case Mips::UDIV:
case Mips::DUDIV:
case Mips::UDIV_MM:
case Mips::SDIV_MM:
FirstOp = 0;
SecondOp = 1;
LLVM_FALLTHROUGH;
case Mips::SDivMacro:
case Mips::DSDivMacro:
case Mips::UDivMacro:
case Mips::DUDivMacro:
case Mips::DIV:
case Mips::DIVU:
case Mips::DDIV:
case Mips::DDIVU:
case Mips::DIVU_MMR6:
case Mips::DIV_MMR6:
if (Inst.getOperand(SecondOp).getReg() == Mips::ZERO ||
Inst.getOperand(SecondOp).getReg() == Mips::ZERO_64) {
if (Inst.getOperand(FirstOp).getReg() == Mips::ZERO ||
Inst.getOperand(FirstOp).getReg() == Mips::ZERO_64)
Warning(IDLoc, "dividing zero by zero");
else
Warning(IDLoc, "division by zero");
}
break;
}
// For PIC code convert unconditional jump to unconditional branch.
if ((Inst.getOpcode() == Mips::J || Inst.getOpcode() == Mips::J_MM) &&
inPicMode()) {
MCInst BInst;
BInst.setOpcode(inMicroMipsMode() ? Mips::BEQ_MM : Mips::BEQ);
BInst.addOperand(MCOperand::createReg(Mips::ZERO));
BInst.addOperand(MCOperand::createReg(Mips::ZERO));
BInst.addOperand(Inst.getOperand(0));
Inst = BInst;
}
// This expansion is not in a function called by tryExpandInstruction()
// because the pseudo-instruction doesn't have a distinct opcode.
if ((Inst.getOpcode() == Mips::JAL || Inst.getOpcode() == Mips::JAL_MM) &&
inPicMode()) {
warnIfNoMacro(IDLoc);
const MCExpr *JalExpr = Inst.getOperand(0).getExpr();
// We can do this expansion if there's only 1 symbol in the argument
// expression.
if (countMCSymbolRefExpr(JalExpr) > 1)
return Error(IDLoc, "jal doesn't support multiple symbols in PIC mode");
// FIXME: This is checking the expression can be handled by the later stages
// of the assembler. We ought to leave it to those later stages.
const MCSymbol *JalSym = getSingleMCSymbol(JalExpr);
// FIXME: Add support for label+offset operands (currently causes an error).
// FIXME: Add support for forward-declared local symbols.
// FIXME: Add expansion for when the LargeGOT option is enabled.
if (JalSym->isInSection() || JalSym->isTemporary() ||
(JalSym->isELF() &&
cast<MCSymbolELF>(JalSym)->getBinding() == ELF::STB_LOCAL)) {
if (isABI_O32()) {
// If it's a local symbol and the O32 ABI is being used, we expand to:
// lw $25, 0($gp)
// R_(MICRO)MIPS_GOT16 label
// addiu $25, $25, 0
// R_(MICRO)MIPS_LO16 label
// jalr $25
const MCExpr *Got16RelocExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT, JalExpr, getContext());
const MCExpr *Lo16RelocExpr =
MipsMCExpr::create(MipsMCExpr::MEK_LO, JalExpr, getContext());
TOut.emitRRX(Mips::LW, Mips::T9, GPReg,
MCOperand::createExpr(Got16RelocExpr), IDLoc, STI);
TOut.emitRRX(Mips::ADDiu, Mips::T9, Mips::T9,
MCOperand::createExpr(Lo16RelocExpr), IDLoc, STI);
} else if (isABI_N32() || isABI_N64()) {
// If it's a local symbol and the N32/N64 ABIs are being used,
// we expand to:
// lw/ld $25, 0($gp)
// R_(MICRO)MIPS_GOT_DISP label
// jalr $25
const MCExpr *GotDispRelocExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, JalExpr, getContext());
TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
GPReg, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
STI);
}
} else {
// If it's an external/weak symbol, we expand to:
// lw/ld $25, 0($gp)
// R_(MICRO)MIPS_CALL16 label
// jalr $25
const MCExpr *Call16RelocExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, JalExpr, getContext());
TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, GPReg,
MCOperand::createExpr(Call16RelocExpr), IDLoc, STI);
}
MCInst JalrInst;
if (IsCpRestoreSet && inMicroMipsMode())
JalrInst.setOpcode(Mips::JALRS_MM);
else
JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR);
JalrInst.addOperand(MCOperand::createReg(Mips::RA));
JalrInst.addOperand(MCOperand::createReg(Mips::T9));
if (EmitJalrReloc) {
// As an optimization hint for the linker, before the JALR we add:
// .reloc tmplabel, R_{MICRO}MIPS_JALR, symbol
// tmplabel:
MCSymbol *TmpLabel = getContext().createTempSymbol();
const MCExpr *TmpExpr = MCSymbolRefExpr::create(TmpLabel, getContext());
const MCExpr *RelocJalrExpr =
MCSymbolRefExpr::create(JalSym, MCSymbolRefExpr::VK_None,
getContext(), IDLoc);
TOut.getStreamer().EmitRelocDirective(*TmpExpr,
inMicroMipsMode() ? "R_MICROMIPS_JALR" : "R_MIPS_JALR",
RelocJalrExpr, IDLoc, *STI);
TOut.getStreamer().EmitLabel(TmpLabel);
}
Inst = JalrInst;
ExpandedJalSym = true;
}
bool IsPCRelativeLoad = (MCID.TSFlags & MipsII::IsPCRelativeLoad) != 0;
if ((MCID.mayLoad() || MCID.mayStore()) && !IsPCRelativeLoad) {
// Check the offset of memory operand, if it is a symbol
// reference or immediate we may have to expand instructions.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
const MCOperandInfo &OpInfo = MCID.OpInfo[i];
if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
(OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
if (Op.isImm()) {
int64_t MemOffset = Op.getImm();
if (MemOffset < -32768 || MemOffset > 32767) {
// Offset can't exceed 16bit value.
expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad());
return getParser().hasPendingError();
}
} else if (Op.isExpr()) {
const MCExpr *Expr = Op.getExpr();
if (Expr->getKind() == MCExpr::SymbolRef) {
const MCSymbolRefExpr *SR =
static_cast<const MCSymbolRefExpr *>(Expr);
if (SR->getKind() == MCSymbolRefExpr::VK_None) {
// Expand symbol.
expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad());
return getParser().hasPendingError();
}
} else if (!isEvaluated(Expr)) {
expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad());
return getParser().hasPendingError();
}
}
}
} // for
} // if load/store
if (inMicroMipsMode()) {
if (MCID.mayLoad() && Inst.getOpcode() != Mips::LWP_MM) {
// Try to create 16-bit GP relative load instruction.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
const MCOperandInfo &OpInfo = MCID.OpInfo[i];
if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
(OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
if (Op.isImm()) {
int MemOffset = Op.getImm();
MCOperand &DstReg = Inst.getOperand(0);
MCOperand &BaseReg = Inst.getOperand(1);
if (isInt<9>(MemOffset) && (MemOffset % 4 == 0) &&
getContext().getRegisterInfo()->getRegClass(
Mips::GPRMM16RegClassID).contains(DstReg.getReg()) &&
(BaseReg.getReg() == Mips::GP ||
BaseReg.getReg() == Mips::GP_64)) {
TOut.emitRRI(Mips::LWGP_MM, DstReg.getReg(), Mips::GP, MemOffset,
IDLoc, STI);
return false;
}
}
}
} // for
} // if load
// TODO: Handle this with the AsmOperandClass.PredicateMethod.
MCOperand Opnd;
int Imm;
switch (Inst.getOpcode()) {
default:
break;
case Mips::ADDIUSP_MM:
Opnd = Inst.getOperand(0);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < -1032 || Imm > 1028 || (Imm < 8 && Imm > -12) ||
Imm % 4 != 0)
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::SLL16_MM:
case Mips::SRL16_MM:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < 1 || Imm > 8)
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::LI16_MM:
Opnd = Inst.getOperand(1);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < -1 || Imm > 126)
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::ADDIUR2_MM:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (!(Imm == 1 || Imm == -1 ||
((Imm % 4 == 0) && Imm < 28 && Imm > 0)))
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::ANDI16_MM:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (!(Imm == 128 || (Imm >= 1 && Imm <= 4) || Imm == 7 || Imm == 8 ||
Imm == 15 || Imm == 16 || Imm == 31 || Imm == 32 || Imm == 63 ||
Imm == 64 || Imm == 255 || Imm == 32768 || Imm == 65535))
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::LBU16_MM:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < -1 || Imm > 14)
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::SB16_MM:
case Mips::SB16_MMR6:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < 0 || Imm > 15)
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::LHU16_MM:
case Mips::SH16_MM:
case Mips::SH16_MMR6:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < 0 || Imm > 30 || (Imm % 2 != 0))
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::LW16_MM:
case Mips::SW16_MM:
case Mips::SW16_MMR6:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if (Imm < 0 || Imm > 60 || (Imm % 4 != 0))
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::ADDIUPC_MM:
Opnd = Inst.getOperand(1);
if (!Opnd.isImm())
return Error(IDLoc, "expected immediate operand kind");
Imm = Opnd.getImm();
if ((Imm % 4 != 0) || !isInt<25>(Imm))
return Error(IDLoc, "immediate operand value out of range");
break;
case Mips::LWP_MM:
case Mips::SWP_MM:
if (Inst.getOperand(0).getReg() == Mips::RA)
return Error(IDLoc, "invalid operand for instruction");
break;
case Mips::MOVEP_MM:
case Mips::MOVEP_MMR6: {
unsigned R0 = Inst.getOperand(0).getReg();
unsigned R1 = Inst.getOperand(1).getReg();
bool RegPair = ((R0 == Mips::A1 && R1 == Mips::A2) ||
(R0 == Mips::A1 && R1 == Mips::A3) ||
(R0 == Mips::A2 && R1 == Mips::A3) ||
(R0 == Mips::A0 && R1 == Mips::S5) ||
(R0 == Mips::A0 && R1 == Mips::S6) ||
(R0 == Mips::A0 && R1 == Mips::A1) ||
(R0 == Mips::A0 && R1 == Mips::A2) ||
(R0 == Mips::A0 && R1 == Mips::A3));
if (!RegPair)
return Error(IDLoc, "invalid operand for instruction");
break;
}
}
}
bool FillDelaySlot =
MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder();
if (FillDelaySlot)
TOut.emitDirectiveSetNoReorder();
MacroExpanderResultTy ExpandResult =
tryExpandInstruction(Inst, IDLoc, Out, STI);
switch (ExpandResult) {
case MER_NotAMacro:
Out.EmitInstruction(Inst, *STI);
break;
case MER_Success:
break;
case MER_Fail:
return true;
}
// We know we emitted an instruction on the MER_NotAMacro or MER_Success path.
// If we're in microMIPS mode then we must also set EF_MIPS_MICROMIPS.
if (inMicroMipsMode()) {
TOut.setUsesMicroMips();
TOut.updateABIInfo(*this);
}
// If this instruction has a delay slot and .set reorder is active,
// emit a NOP after it.
if (FillDelaySlot) {
TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc, STI);
TOut.emitDirectiveSetReorder();
}
if ((Inst.getOpcode() == Mips::JalOneReg ||
Inst.getOpcode() == Mips::JalTwoReg || ExpandedJalSym) &&
isPicAndNotNxxAbi()) {
if (IsCpRestoreSet) {
// We need a NOP between the JALR and the LW:
// If .set reorder has been used, we've already emitted a NOP.
// If .set noreorder has been used, we need to emit a NOP at this point.
if (!AssemblerOptions.back()->isReorder())
TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc,
STI);
// Load the $gp from the stack.
TOut.emitGPRestore(CpRestoreOffset, IDLoc, STI);
} else
Warning(IDLoc, "no .cprestore used in PIC mode");
}
return false;
}
MipsAsmParser::MacroExpanderResultTy
MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
switch (Inst.getOpcode()) {
default:
return MER_NotAMacro;
case Mips::LoadImm32:
return expandLoadImm(Inst, true, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::LoadImm64:
return expandLoadImm(Inst, false, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::LoadAddrImm32:
case Mips::LoadAddrImm64:
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
assert((Inst.getOperand(1).isImm() || Inst.getOperand(1).isExpr()) &&
"expected immediate operand kind");
return expandLoadAddress(Inst.getOperand(0).getReg(), Mips::NoRegister,
Inst.getOperand(1),
Inst.getOpcode() == Mips::LoadAddrImm32, IDLoc,
Out, STI)
? MER_Fail
: MER_Success;
case Mips::LoadAddrReg32:
case Mips::LoadAddrReg64:
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
assert(Inst.getOperand(1).isReg() && "expected register operand kind");
assert((Inst.getOperand(2).isImm() || Inst.getOperand(2).isExpr()) &&
"expected immediate operand kind");
return expandLoadAddress(Inst.getOperand(0).getReg(),
Inst.getOperand(1).getReg(), Inst.getOperand(2),
Inst.getOpcode() == Mips::LoadAddrReg32, IDLoc,
Out, STI)
? MER_Fail
: MER_Success;
case Mips::B_MM_Pseudo:
case Mips::B_MMR6_Pseudo:
return expandUncondBranchMMPseudo(Inst, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
case Mips::SWM_MM:
case Mips::LWM_MM:
return expandLoadStoreMultiple(Inst, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
case Mips::JalOneReg:
case Mips::JalTwoReg:
return expandJalWithRegs(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::BneImm:
case Mips::BeqImm:
case Mips::BEQLImmMacro:
case Mips::BNELImmMacro:
return expandBranchImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::BLT:
case Mips::BLE:
case Mips::BGE:
case Mips::BGT:
case Mips::BLTU:
case Mips::BLEU:
case Mips::BGEU:
case Mips::BGTU:
case Mips::BLTL:
case Mips::BLEL:
case Mips::BGEL:
case Mips::BGTL:
case Mips::BLTUL:
case Mips::BLEUL:
case Mips::BGEUL:
case Mips::BGTUL:
case Mips::BLTImmMacro:
case Mips::BLEImmMacro:
case Mips::BGEImmMacro:
case Mips::BGTImmMacro:
case Mips::BLTUImmMacro:
case Mips::BLEUImmMacro:
case Mips::BGEUImmMacro:
case Mips::BGTUImmMacro:
case Mips::BLTLImmMacro:
case Mips::BLELImmMacro:
case Mips::BGELImmMacro:
case Mips::BGTLImmMacro:
case Mips::BLTULImmMacro:
case Mips::BLEULImmMacro:
case Mips::BGEULImmMacro:
case Mips::BGTULImmMacro:
return expandCondBranches(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SDivMacro:
case Mips::SDivIMacro:
case Mips::SRemMacro:
case Mips::SRemIMacro:
return expandDivRem(Inst, IDLoc, Out, STI, false, true) ? MER_Fail
: MER_Success;
case Mips::DSDivMacro:
case Mips::DSDivIMacro:
case Mips::DSRemMacro:
case Mips::DSRemIMacro:
return expandDivRem(Inst, IDLoc, Out, STI, true, true) ? MER_Fail
: MER_Success;
case Mips::UDivMacro:
case Mips::UDivIMacro:
case Mips::URemMacro:
case Mips::URemIMacro:
return expandDivRem(Inst, IDLoc, Out, STI, false, false) ? MER_Fail
: MER_Success;
case Mips::DUDivMacro:
case Mips::DUDivIMacro:
case Mips::DURemMacro:
case Mips::DURemIMacro:
return expandDivRem(Inst, IDLoc, Out, STI, true, false) ? MER_Fail
: MER_Success;
case Mips::PseudoTRUNC_W_S:
return expandTrunc(Inst, false, false, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
case Mips::PseudoTRUNC_W_D32:
return expandTrunc(Inst, true, false, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
case Mips::PseudoTRUNC_W_D:
return expandTrunc(Inst, true, true, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
case Mips::LoadImmSingleGPR:
- return expandLoadImmReal(Inst, true, true, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadSingleImmToGPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmSingleFGR:
- return expandLoadImmReal(Inst, true, false, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadSingleImmToFPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleGPR:
- return expandLoadImmReal(Inst, false, true, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToGPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleFGR:
- return expandLoadImmReal(Inst, false, false, true, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToFPR(Inst, true, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleFGR_32:
- return expandLoadImmReal(Inst, false, false, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToFPR(Inst, false, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
+
case Mips::Ulh:
return expandUlh(Inst, true, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ulhu:
return expandUlh(Inst, false, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ush:
return expandUsh(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ulw:
case Mips::Usw:
return expandUxw(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::NORImm:
case Mips::NORImm64:
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SGE:
case Mips::SGEU:
return expandSge(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SGEImm:
case Mips::SGEUImm:
case Mips::SGEImm64:
case Mips::SGEUImm64:
return expandSgeImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SGTImm:
case Mips::SGTUImm:
case Mips::SGTImm64:
case Mips::SGTUImm64:
return expandSgtImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SLTImm64:
if (isInt<16>(Inst.getOperand(2).getImm())) {
Inst.setOpcode(Mips::SLTi64);
return MER_NotAMacro;
}
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SLTUImm64:
if (isInt<16>(Inst.getOperand(2).getImm())) {
Inst.setOpcode(Mips::SLTiu64);
return MER_NotAMacro;
}
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::ADDi: case Mips::ADDi_MM:
case Mips::ADDiu: case Mips::ADDiu_MM:
case Mips::SLTi: case Mips::SLTi_MM:
case Mips::SLTiu: case Mips::SLTiu_MM:
if ((Inst.getNumOperands() == 3) && Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() && Inst.getOperand(2).isImm()) {
int64_t ImmValue = Inst.getOperand(2).getImm();
if (isInt<16>(ImmValue))
return MER_NotAMacro;
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
}
return MER_NotAMacro;
case Mips::ANDi: case Mips::ANDi_MM: case Mips::ANDi64:
case Mips::ORi: case Mips::ORi_MM: case Mips::ORi64:
case Mips::XORi: case Mips::XORi_MM: case Mips::XORi64:
if ((Inst.getNumOperands() == 3) && Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() && Inst.getOperand(2).isImm()) {
int64_t ImmValue = Inst.getOperand(2).getImm();
if (isUInt<16>(ImmValue))
return MER_NotAMacro;
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail
: MER_Success;
}
return MER_NotAMacro;
case Mips::ROL:
case Mips::ROR:
return expandRotation(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::ROLImm:
case Mips::RORImm:
return expandRotationImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::DROL:
case Mips::DROR:
return expandDRotation(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::DROLImm:
case Mips::DRORImm:
return expandDRotationImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::ABSMacro:
return expandAbs(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::MULImmMacro:
case Mips::DMULImmMacro:
return expandMulImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::MULOMacro:
case Mips::DMULOMacro:
return expandMulO(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::MULOUMacro:
case Mips::DMULOUMacro:
return expandMulOU(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::DMULMacro:
return expandDMULMacro(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::LDMacro:
case Mips::SDMacro:
return expandLoadStoreDMacro(Inst, IDLoc, Out, STI,
Inst.getOpcode() == Mips::LDMacro)
? MER_Fail
: MER_Success;
case Mips::SDC1_M1:
return expandStoreDM1Macro(Inst, IDLoc, Out, STI)
? MER_Fail
: MER_Success;
case Mips::SEQMacro:
return expandSeq(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SEQIMacro:
return expandSeqI(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::MFTC0: case Mips::MTTC0:
case Mips::MFTGPR: case Mips::MTTGPR:
case Mips::MFTLO: case Mips::MTTLO:
case Mips::MFTHI: case Mips::MTTHI:
case Mips::MFTACX: case Mips::MTTACX:
case Mips::MFTDSP: case Mips::MTTDSP:
case Mips::MFTC1: case Mips::MTTC1:
case Mips::MFTHC1: case Mips::MTTHC1:
case Mips::CFTC1: case Mips::CTTC1:
return expandMXTRAlias(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SaaAddr:
case Mips::SaadAddr:
return expandSaaAddr(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
}
}
bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
// Create a JALR instruction which is going to replace the pseudo-JAL.
MCInst JalrInst;
JalrInst.setLoc(IDLoc);
const MCOperand FirstRegOp = Inst.getOperand(0);
const unsigned Opcode = Inst.getOpcode();
if (Opcode == Mips::JalOneReg) {
// jal $rs => jalr $rs
if (IsCpRestoreSet && inMicroMipsMode()) {
JalrInst.setOpcode(Mips::JALRS16_MM);
JalrInst.addOperand(FirstRegOp);
} else if (inMicroMipsMode()) {
JalrInst.setOpcode(hasMips32r6() ? Mips::JALRC16_MMR6 : Mips::JALR16_MM);
JalrInst.addOperand(FirstRegOp);
} else {
JalrInst.setOpcode(Mips::JALR);
JalrInst.addOperand(MCOperand::createReg(Mips::RA));
JalrInst.addOperand(FirstRegOp);
}
} else if (Opcode == Mips::JalTwoReg) {
// jal $rd, $rs => jalr $rd, $rs
if (IsCpRestoreSet && inMicroMipsMode())
JalrInst.setOpcode(Mips::JALRS_MM);
else
JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR);
JalrInst.addOperand(FirstRegOp);
const MCOperand SecondRegOp = Inst.getOperand(1);
JalrInst.addOperand(SecondRegOp);
}
Out.EmitInstruction(JalrInst, *STI);
// If .set reorder is active and branch instruction has a delay slot,
// emit a NOP after it.
const MCInstrDesc &MCID = getInstDesc(JalrInst.getOpcode());
if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst), IDLoc,
STI);
return false;
}
/// Can the value be represented by a unsigned N-bit value and a shift left?
template <unsigned N> static bool isShiftedUIntAtAnyPosition(uint64_t x) {
unsigned BitNum = findFirstSet(x);
return (x == x >> BitNum << BitNum) && isUInt<N>(x >> BitNum);
}
/// Load (or add) an immediate into a register.
///
/// @param ImmValue The immediate to load.
/// @param DstReg The register that will hold the immediate.
/// @param SrcReg A register to add to the immediate or Mips::NoRegister
/// for a simple initialization.
/// @param Is32BitImm Is ImmValue 32-bit or 64-bit?
/// @param IsAddress True if the immediate represents an address. False if it
/// is an integer.
/// @param IDLoc Location of the immediate in the source file.
bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
unsigned SrcReg, bool Is32BitImm,
bool IsAddress, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
if (!Is32BitImm && !isGP64bit()) {
Error(IDLoc, "instruction requires a 64-bit architecture");
return true;
}
if (Is32BitImm) {
if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) {
// Sign extend up to 64-bit so that the predicates match the hardware
// behaviour. In particular, isInt<16>(0xffff8000) and similar should be
// true.
ImmValue = SignExtend64<32>(ImmValue);
} else {
Error(IDLoc, "instruction requires a 32-bit immediate");
return true;
}
}
unsigned ZeroReg = IsAddress ? ABI.GetNullPtr() : ABI.GetZeroReg();
unsigned AdduOp = !Is32BitImm ? Mips::DADDu : Mips::ADDu;
bool UseSrcReg = false;
if (SrcReg != Mips::NoRegister)
UseSrcReg = true;
unsigned TmpReg = DstReg;
if (UseSrcReg &&
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, SrcReg)) {
// At this point we need AT to perform the expansions and we exit if it is
// not available.
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TmpReg = ATReg;
}
if (isInt<16>(ImmValue)) {
if (!UseSrcReg)
SrcReg = ZeroReg;
// This doesn't quite follow the usual ABI expectations for N32 but matches
// traditional assembler behaviour. N32 would normally use addiu for both
// integers and addresses.
if (IsAddress && !Is32BitImm) {
TOut.emitRRI(Mips::DADDiu, DstReg, SrcReg, ImmValue, IDLoc, STI);
return false;
}
TOut.emitRRI(Mips::ADDiu, DstReg, SrcReg, ImmValue, IDLoc, STI);
return false;
}
if (isUInt<16>(ImmValue)) {
unsigned TmpReg = DstReg;
if (SrcReg == DstReg) {
TmpReg = getATReg(IDLoc);
if (!TmpReg)
return true;
}
TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, ImmValue, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(ABI.GetPtrAdduOp(), DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) {
warnIfNoMacro(IDLoc);
uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff;
uint16_t Bits15To0 = ImmValue & 0xffff;
if (!Is32BitImm && !isInt<32>(ImmValue)) {
// Traditional behaviour seems to special case this particular value. It's
// not clear why other masks are handled differently.
if (ImmValue == 0xffffffff) {
TOut.emitRI(Mips::LUi, TmpReg, 0xffff, IDLoc, STI);
TOut.emitRRI(Mips::DSRL32, TmpReg, TmpReg, 0, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
// Expand to an ORi instead of a LUi to avoid sign-extending into the
// upper 32 bits.
TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits31To16, IDLoc, STI);
TOut.emitRRI(Mips::DSLL, TmpReg, TmpReg, 16, IDLoc, STI);
if (Bits15To0)
TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
TOut.emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, STI);
if (Bits15To0)
TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
if (isShiftedUIntAtAnyPosition<16>(ImmValue)) {
if (Is32BitImm) {
Error(IDLoc, "instruction requires a 32-bit immediate");
return true;
}
// Traditionally, these immediates are shifted as little as possible and as
// such we align the most significant bit to bit 15 of our temporary.
unsigned FirstSet = findFirstSet((uint64_t)ImmValue);
unsigned LastSet = findLastSet((uint64_t)ImmValue);
unsigned ShiftAmount = FirstSet - (15 - (LastSet - FirstSet));
uint16_t Bits = (ImmValue >> ShiftAmount) & 0xffff;
TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits, IDLoc, STI);
TOut.emitRRI(Mips::DSLL, TmpReg, TmpReg, ShiftAmount, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
warnIfNoMacro(IDLoc);
// The remaining case is packed with a sequence of dsll and ori with zeros
// being omitted and any neighbouring dsll's being coalesced.
// The highest 32-bit's are equivalent to a 32-bit immediate load.
// Load bits 32-63 of ImmValue into bits 0-31 of the temporary register.
if (loadImmediate(ImmValue >> 32, TmpReg, Mips::NoRegister, true, false,
IDLoc, Out, STI))
return false;
// Shift and accumulate into the register. If a 16-bit chunk is zero, then
// skip it and defer the shift to the next chunk.
unsigned ShiftCarriedForwards = 16;
for (int BitNum = 16; BitNum >= 0; BitNum -= 16) {
uint16_t ImmChunk = (ImmValue >> BitNum) & 0xffff;
if (ImmChunk != 0) {
TOut.emitDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, STI);
TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, ImmChunk, IDLoc, STI);
ShiftCarriedForwards = 0;
}
ShiftCarriedForwards += 16;
}
ShiftCarriedForwards -= 16;
// Finish any remaining shifts left by trailing zeros.
if (ShiftCarriedForwards)
TOut.emitDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI) {
const MCOperand &ImmOp = Inst.getOperand(1);
assert(ImmOp.isImm() && "expected immediate operand kind");
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister,
Is32BitImm, false, IDLoc, Out, STI))
return true;
return false;
}
bool MipsAsmParser::expandLoadAddress(unsigned DstReg, unsigned BaseReg,
const MCOperand &Offset,
bool Is32BitAddress, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
// la can't produce a usable address when addresses are 64-bit.
if (Is32BitAddress && ABI.ArePtrs64bit()) {
// FIXME: Demote this to a warning and continue as if we had 'dla' instead.
// We currently can't do this because we depend on the equality
// operator and N64 can end up with a GPR32/GPR64 mismatch.
Error(IDLoc, "la used to load 64-bit address");
// Continue as if we had 'dla' instead.
Is32BitAddress = false;
return true;
}
// dla requires 64-bit addresses.
if (!Is32BitAddress && !hasMips3()) {
Error(IDLoc, "instruction requires a 64-bit architecture");
return true;
}
if (!Offset.isImm())
return loadAndAddSymbolAddress(Offset.getExpr(), DstReg, BaseReg,
Is32BitAddress, IDLoc, Out, STI);
if (!ABI.ArePtrs64bit()) {
// Continue as if we had 'la' whether we had 'la' or 'dla'.
Is32BitAddress = true;
}
return loadImmediate(Offset.getImm(), DstReg, BaseReg, Is32BitAddress, true,
IDLoc, Out, STI);
}
bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
unsigned DstReg, unsigned SrcReg,
bool Is32BitSym, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
// FIXME: These expansions do not respect -mxgot.
MipsTargetStreamer &TOut = getTargetStreamer();
bool UseSrcReg = SrcReg != Mips::NoRegister;
warnIfNoMacro(IDLoc);
if (inPicMode() && ABI.IsO32()) {
MCValue Res;
if (!SymExpr->evaluateAsRelocatable(Res, nullptr, nullptr)) {
Error(IDLoc, "expected relocatable expression");
return true;
}
if (Res.getSymB() != nullptr) {
Error(IDLoc, "expected relocatable expression with only one symbol");
return true;
}
// The case where the result register is $25 is somewhat special. If the
// symbol in the final relocation is external and not modified with a
// constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT16.
if ((DstReg == Mips::T9 || DstReg == Mips::T9_64) && !UseSrcReg &&
Res.getConstant() == 0 &&
!(Res.getSymA()->getSymbol().isInSection() ||
Res.getSymA()->getSymbol().isTemporary() ||
(Res.getSymA()->getSymbol().isELF() &&
cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
ELF::STB_LOCAL))) {
const MCExpr *CallExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
TOut.emitRRX(Mips::LW, DstReg, GPReg, MCOperand::createExpr(CallExpr),
IDLoc, STI);
return false;
}
// The remaining cases are:
// External GOT: lw $tmp, %got(symbol+offset)($gp)
// >addiu $tmp, $tmp, %lo(offset)
// >addiu $rd, $tmp, $rs
// Local GOT: lw $tmp, %got(symbol+offset)($gp)
// addiu $tmp, $tmp, %lo(symbol+offset)($gp)
// >addiu $rd, $tmp, $rs
// The addiu's marked with a '>' may be omitted if they are redundant. If
// this happens then the last instruction must use $rd as the result
// register.
const MipsMCExpr *GotExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT, SymExpr, getContext());
const MCExpr *LoExpr = nullptr;
if (Res.getSymA()->getSymbol().isInSection() ||
Res.getSymA()->getSymbol().isTemporary())
LoExpr = MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
else if (Res.getConstant() != 0) {
// External symbols fully resolve the symbol with just the %got(symbol)
// but we must still account for any offset to the symbol for expressions
// like symbol+8.
LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
}
unsigned TmpReg = DstReg;
if (UseSrcReg &&
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
SrcReg)) {
// If $rs is the same as $rd, we need to use AT.
// If it is not available we exit.
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TmpReg = ATReg;
}
TOut.emitRRX(Mips::LW, TmpReg, GPReg, MCOperand::createExpr(GotExpr), IDLoc,
STI);
if (LoExpr)
TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
if (inPicMode() && ABI.ArePtrs64bit()) {
MCValue Res;
if (!SymExpr->evaluateAsRelocatable(Res, nullptr, nullptr)) {
Error(IDLoc, "expected relocatable expression");
return true;
}
if (Res.getSymB() != nullptr) {
Error(IDLoc, "expected relocatable expression with only one symbol");
return true;
}
// The case where the result register is $25 is somewhat special. If the
// symbol in the final relocation is external and not modified with a
// constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT_DISP.
if ((DstReg == Mips::T9 || DstReg == Mips::T9_64) && !UseSrcReg &&
Res.getConstant() == 0 &&
!(Res.getSymA()->getSymbol().isInSection() ||
Res.getSymA()->getSymbol().isTemporary() ||
(Res.getSymA()->getSymbol().isELF() &&
cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
ELF::STB_LOCAL))) {
const MCExpr *CallExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
TOut.emitRRX(Mips::LD, DstReg, GPReg, MCOperand::createExpr(CallExpr),
IDLoc, STI);
return false;
}
// The remaining cases are:
// Small offset: ld $tmp, %got_disp(symbol)($gp)
// >daddiu $tmp, $tmp, offset
// >daddu $rd, $tmp, $rs
// The daddiu's marked with a '>' may be omitted if they are redundant. If
// this happens then the last instruction must use $rd as the result
// register.
const MipsMCExpr *GotExpr = MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP,
Res.getSymA(),
getContext());
const MCExpr *LoExpr = nullptr;
if (Res.getConstant() != 0) {
// Symbols fully resolve with just the %got_disp(symbol) but we
// must still account for any offset to the symbol for
// expressions like symbol+8.
LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
// FIXME: Offsets greater than 16 bits are not yet implemented.
// FIXME: The correct range is a 32-bit sign-extended number.
if (Res.getConstant() < -0x8000 || Res.getConstant() > 0x7fff) {
Error(IDLoc, "macro instruction uses large offset, which is not "
"currently supported");
return true;
}
}
unsigned TmpReg = DstReg;
if (UseSrcReg &&
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
SrcReg)) {
// If $rs is the same as $rd, we need to use AT.
// If it is not available we exit.
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TmpReg = ATReg;
}
TOut.emitRRX(Mips::LD, TmpReg, GPReg, MCOperand::createExpr(GotExpr), IDLoc,
STI);
if (LoExpr)
TOut.emitRRX(Mips::DADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(Mips::DADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
const MipsMCExpr *HiExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HI, SymExpr, getContext());
const MipsMCExpr *LoExpr =
MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
// This is the 64-bit symbol address expansion.
if (ABI.ArePtrs64bit() && isGP64bit()) {
// We need AT for the 64-bit expansion in the cases where the optional
// source register is the destination register and for the superscalar
// scheduled form.
//
// If it is not available we exit if the destination is the same as the
// source register.
const MipsMCExpr *HighestExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HIGHEST, SymExpr, getContext());
const MipsMCExpr *HigherExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HIGHER, SymExpr, getContext());
bool RdRegIsRsReg =
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, SrcReg);
if (canUseATReg() && UseSrcReg && RdRegIsRsReg) {
unsigned ATReg = getATReg(IDLoc);
// If $rs is the same as $rd:
// (d)la $rd, sym($rd) => lui $at, %highest(sym)
// daddiu $at, $at, %higher(sym)
// dsll $at, $at, 16
// daddiu $at, $at, %hi(sym)
// dsll $at, $at, 16
// daddiu $at, $at, %lo(sym)
// daddu $rd, $at, $rd
TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc,
STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg,
MCOperand::createExpr(HigherExpr), IDLoc, STI);
TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr),
IDLoc, STI);
TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr),
IDLoc, STI);
TOut.emitRRR(Mips::DADDu, DstReg, ATReg, SrcReg, IDLoc, STI);
return false;
} else if (canUseATReg() && !RdRegIsRsReg && DstReg != getATReg(IDLoc)) {
unsigned ATReg = getATReg(IDLoc);
// If the $rs is different from $rd or if $rs isn't specified and we
// have $at available:
// (d)la $rd, sym/sym($rs) => lui $rd, %highest(sym)
// lui $at, %hi(sym)
// daddiu $rd, $rd, %higher(sym)
// daddiu $at, $at, %lo(sym)
// dsll32 $rd, $rd, 0
// daddu $rd, $rd, $at
// (daddu $rd, $rd, $rs)
//
// Which is preferred for superscalar issue.
TOut.emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc,
STI);
TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, DstReg, DstReg,
MCOperand::createExpr(HigherExpr), IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr),
IDLoc, STI);
TOut.emitRRI(Mips::DSLL32, DstReg, DstReg, 0, IDLoc, STI);
TOut.emitRRR(Mips::DADDu, DstReg, DstReg, ATReg, IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, STI);
return false;
} else if ((!canUseATReg() && !RdRegIsRsReg) ||
(canUseATReg() && DstReg == getATReg(IDLoc))) {
// Otherwise, synthesize the address in the destination register
// serially:
// (d)la $rd, sym/sym($rs) => lui $rd, %highest(sym)
// daddiu $rd, $rd, %higher(sym)
// dsll $rd, $rd, 16
// daddiu $rd, $rd, %hi(sym)
// dsll $rd, $rd, 16
// daddiu $rd, $rd, %lo(sym)
TOut.emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc,
STI);
TOut.emitRRX(Mips::DADDiu, DstReg, DstReg,
MCOperand::createExpr(HigherExpr), IDLoc, STI);
TOut.emitRRI(Mips::DSLL, DstReg, DstReg, 16, IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, DstReg, DstReg,
MCOperand::createExpr(HiExpr), IDLoc, STI);
TOut.emitRRI(Mips::DSLL, DstReg, DstReg, 16, IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, DstReg, DstReg,
MCOperand::createExpr(LoExpr), IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, STI);
return false;
} else {
// We have a case where SrcReg == DstReg and we don't have $at
// available. We can't expand this case, so error out appropriately.
assert(SrcReg == DstReg && !canUseATReg() &&
"Could have expanded dla but didn't?");
reportParseError(IDLoc,
"pseudo-instruction requires $at, which is not available");
return true;
}
}
// And now, the 32-bit symbol address expansion:
// If $rs is the same as $rd:
// (d)la $rd, sym($rd) => lui $at, %hi(sym)
// ori $at, $at, %lo(sym)
// addu $rd, $at, $rd
// Otherwise, if the $rs is different from $rd or if $rs isn't specified:
// (d)la $rd, sym/sym($rs) => lui $rd, %hi(sym)
// ori $rd, $rd, %lo(sym)
// (addu $rd, $rd, $rs)
unsigned TmpReg = DstReg;
if (UseSrcReg &&
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, SrcReg)) {
// If $rs is the same as $rd, we need to use AT.
// If it is not available we exit.
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TmpReg = ATReg;
}
TOut.emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(HiExpr), IDLoc, STI);
TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
IDLoc, STI);
if (UseSrcReg)
TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
else
assert(
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, TmpReg));
return false;
}
// Each double-precision register DO-D15 overlaps with two of the single
// precision registers F0-F31. As an example, all of the following hold true:
// D0 + 1 == F1, F1 + 1 == D1, F1 + 1 == F2, depending on the context.
static unsigned nextReg(unsigned Reg) {
if (MipsMCRegisterClasses[Mips::FGR32RegClassID].contains(Reg))
return Reg == (unsigned)Mips::F31 ? (unsigned)Mips::F0 : Reg + 1;
switch (Reg) {
default: llvm_unreachable("Unknown register in assembly macro expansion!");
case Mips::ZERO: return Mips::AT;
case Mips::AT: return Mips::V0;
case Mips::V0: return Mips::V1;
case Mips::V1: return Mips::A0;
case Mips::A0: return Mips::A1;
case Mips::A1: return Mips::A2;
case Mips::A2: return Mips::A3;
case Mips::A3: return Mips::T0;
case Mips::T0: return Mips::T1;
case Mips::T1: return Mips::T2;
case Mips::T2: return Mips::T3;
case Mips::T3: return Mips::T4;
case Mips::T4: return Mips::T5;
case Mips::T5: return Mips::T6;
case Mips::T6: return Mips::T7;
case Mips::T7: return Mips::S0;
case Mips::S0: return Mips::S1;
case Mips::S1: return Mips::S2;
case Mips::S2: return Mips::S3;
case Mips::S3: return Mips::S4;
case Mips::S4: return Mips::S5;
case Mips::S5: return Mips::S6;
case Mips::S6: return Mips::S7;
case Mips::S7: return Mips::T8;
case Mips::T8: return Mips::T9;
case Mips::T9: return Mips::K0;
case Mips::K0: return Mips::K1;
case Mips::K1: return Mips::GP;
case Mips::GP: return Mips::SP;
case Mips::SP: return Mips::FP;
case Mips::FP: return Mips::RA;
case Mips::RA: return Mips::ZERO;
case Mips::D0: return Mips::F1;
case Mips::D1: return Mips::F3;
case Mips::D2: return Mips::F5;
case Mips::D3: return Mips::F7;
case Mips::D4: return Mips::F9;
case Mips::D5: return Mips::F11;
case Mips::D6: return Mips::F13;
case Mips::D7: return Mips::F15;
case Mips::D8: return Mips::F17;
case Mips::D9: return Mips::F19;
case Mips::D10: return Mips::F21;
case Mips::D11: return Mips::F23;
case Mips::D12: return Mips::F25;
case Mips::D13: return Mips::F27;
case Mips::D14: return Mips::F29;
case Mips::D15: return Mips::F31;
}
}
// FIXME: This method is too general. In principle we should compute the number
// of instructions required to synthesize the immediate inline compared to
// synthesizing the address inline and relying on non .text sections.
// For static O32 and N32 this may yield a small benefit, for static N64 this is
// likely to yield a much larger benefit as we have to synthesize a 64bit
// address to load a 64 bit value.
bool MipsAsmParser::emitPartialAddress(MipsTargetStreamer &TOut, SMLoc IDLoc,
MCSymbol *Sym) {
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if(IsPicEnabled) {
const MCExpr *GotSym =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
const MipsMCExpr *GotExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT, GotSym, getContext());
if(isABI_O32() || isABI_N32()) {
TOut.emitRRX(Mips::LW, ATReg, GPReg, MCOperand::createExpr(GotExpr),
IDLoc, STI);
} else { //isABI_N64()
TOut.emitRRX(Mips::LD, ATReg, GPReg, MCOperand::createExpr(GotExpr),
IDLoc, STI);
}
} else { //!IsPicEnabled
const MCExpr *HiSym =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
const MipsMCExpr *HiExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HI, HiSym, getContext());
// FIXME: This is technically correct but gives a different result to gas,
// but gas is incomplete there (it has a fixme noting it doesn't work with
// 64-bit addresses).
// FIXME: With -msym32 option, the address expansion for N64 should probably
// use the O32 / N32 case. It's safe to use the 64 address expansion as the
// symbol's value is considered sign extended.
if(isABI_O32() || isABI_N32()) {
TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc, STI);
} else { //isABI_N64()
const MCExpr *HighestSym =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
const MipsMCExpr *HighestExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HIGHEST, HighestSym, getContext());
const MCExpr *HigherSym =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
const MipsMCExpr *HigherExpr =
MipsMCExpr::create(MipsMCExpr::MEK_HIGHER, HigherSym, getContext());
TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc,
STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg,
MCOperand::createExpr(HigherExpr), IDLoc, STI);
TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr),
IDLoc, STI);
TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
}
}
return false;
}
-bool MipsAsmParser::expandLoadImmReal(MCInst &Inst, bool IsSingle, bool IsGPR,
- bool Is64FPU, SMLoc IDLoc,
- MCStreamer &Out,
- const MCSubtargetInfo *STI) {
+static uint64_t convertIntToDoubleImm(uint64_t ImmOp64) {
+ // If ImmOp64 is AsmToken::Integer type (all bits set to zero in the
+ // exponent field), convert it to double (e.g. 1 to 1.0)
+ if ((Hi_32(ImmOp64) & 0x7ff00000) == 0) {
+ APFloat RealVal(APFloat::IEEEdouble(), ImmOp64);
+ ImmOp64 = RealVal.bitcastToAPInt().getZExtValue();
+ }
+ return ImmOp64;
+}
+
+static uint32_t covertDoubleImmToSingleImm(uint64_t ImmOp64) {
+ // Conversion of a double in an uint64_t to a float in a uint32_t,
+ // retaining the bit pattern of a float.
+ double DoubleImm = BitsToDouble(ImmOp64);
+ float TmpFloat = static_cast<float>(DoubleImm);
+ return FloatToBits(TmpFloat);
+}
+
+bool MipsAsmParser::expandLoadSingleImmToGPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
+
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
+
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
+
+ uint32_t ImmOp32 = covertDoubleImmToSingleImm(ImmOp64);
+
+ return loadImmediate(ImmOp32, FirstReg, Mips::NoRegister, true, true, IDLoc,
+ Out, STI);
+}
+
+bool MipsAsmParser::expandLoadSingleImmToFPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 2 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
"Invalid instruction operand.");
unsigned FirstReg = Inst.getOperand(0).getReg();
uint64_t ImmOp64 = Inst.getOperand(1).getImm();
- uint32_t HiImmOp64 = (ImmOp64 & 0xffffffff00000000) >> 32;
- // If ImmOp64 is AsmToken::Integer type (all bits set to zero in the
- // exponent field), convert it to double (e.g. 1 to 1.0)
- if ((HiImmOp64 & 0x7ff00000) == 0) {
- APFloat RealVal(APFloat::IEEEdouble(), ImmOp64);
- ImmOp64 = RealVal.bitcastToAPInt().getZExtValue();
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
+
+ uint32_t ImmOp32 = covertDoubleImmToSingleImm(ImmOp64);
+
+ unsigned TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
+ return true;
+
+ if (Lo_32(ImmOp64) == 0) {
+ if (loadImmediate(ImmOp32, TmpReg, Mips::NoRegister, true, true, IDLoc, Out,
+ STI))
+ return true;
+ TOut.emitRR(Mips::MTC1, FirstReg, TmpReg, IDLoc, STI);
+ return false;
}
- uint32_t LoImmOp64 = ImmOp64 & 0xffffffff;
- HiImmOp64 = (ImmOp64 & 0xffffffff00000000) >> 32;
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
+ // where appropriate.
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
- if (IsSingle) {
- // Conversion of a double in an uint64_t to a float in a uint32_t,
- // retaining the bit pattern of a float.
- uint32_t ImmOp32;
- double doubleImm = BitsToDouble(ImmOp64);
- float tmp_float = static_cast<float>(doubleImm);
- ImmOp32 = FloatToBits(tmp_float);
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
- if (IsGPR) {
- if (loadImmediate(ImmOp32, FirstReg, Mips::NoRegister, true, true, IDLoc,
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitIntValue(ImmOp32, 4);
+ getStreamer().SwitchSection(CS);
+
+ if (emitPartialAddress(TOut, IDLoc, Sym))
+ return true;
+ TOut.emitRRX(Mips::LWC1, FirstReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+ return false;
+}
+
+bool MipsAsmParser::expandLoadDoubleImmToGPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
+
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
+
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
+
+ uint32_t LoImmOp64 = Lo_32(ImmOp64);
+ uint32_t HiImmOp64 = Hi_32(ImmOp64);
+
+ unsigned TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
+ return true;
+
+ if (LoImmOp64 == 0) {
+ if (isABI_N32() || isABI_N64()) {
+ if (loadImmediate(ImmOp64, FirstReg, Mips::NoRegister, false, true, IDLoc,
Out, STI))
return true;
- return false;
} else {
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
+ if (loadImmediate(HiImmOp64, FirstReg, Mips::NoRegister, true, true,
+ IDLoc, Out, STI))
return true;
- if (LoImmOp64 == 0) {
- if (loadImmediate(ImmOp32, ATReg, Mips::NoRegister, true, true, IDLoc,
- Out, STI))
- return true;
- TOut.emitRR(Mips::MTC1, FirstReg, ATReg, IDLoc, STI);
- return false;
- }
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
- // where appropriate.
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
-
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
-
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(ImmOp32, 4);
- getStreamer().SwitchSection(CS);
-
- if(emitPartialAddress(TOut, IDLoc, Sym))
+ if (loadImmediate(0, nextReg(FirstReg), Mips::NoRegister, true, true,
+ IDLoc, Out, STI))
return true;
- TOut.emitRRX(Mips::LWC1, FirstReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
}
return false;
}
- // if(!IsSingle)
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitValueToAlignment(8);
+ getStreamer().EmitIntValue(ImmOp64, 8);
+ getStreamer().SwitchSection(CS);
+
+ if (emitPartialAddress(TOut, IDLoc, Sym))
return true;
- if (IsGPR) {
- if (LoImmOp64 == 0) {
- if(isABI_N32() || isABI_N64()) {
- if (loadImmediate(HiImmOp64, FirstReg, Mips::NoRegister, false, true,
- IDLoc, Out, STI))
- return true;
- return false;
- } else {
- if (loadImmediate(HiImmOp64, FirstReg, Mips::NoRegister, true, true,
- IDLoc, Out, STI))
- return true;
+ if (isABI_N64())
+ TOut.emitRRX(Mips::DADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+ else
+ TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
- if (loadImmediate(0, nextReg(FirstReg), Mips::NoRegister, true, true,
- IDLoc, Out, STI))
- return true;
- return false;
- }
- }
+ if (isABI_N32() || isABI_N64())
+ TOut.emitRRI(Mips::LD, FirstReg, TmpReg, 0, IDLoc, STI);
+ else {
+ TOut.emitRRI(Mips::LW, FirstReg, TmpReg, 0, IDLoc, STI);
+ TOut.emitRRI(Mips::LW, nextReg(FirstReg), TmpReg, 4, IDLoc, STI);
+ }
+ return false;
+}
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+bool MipsAsmParser::expandLoadDoubleImmToFPR(MCInst &Inst, bool Is64FPU,
+ SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(HiImmOp64, 4);
- getStreamer().EmitIntValue(LoImmOp64, 4);
- getStreamer().SwitchSection(CS);
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
- if(emitPartialAddress(TOut, IDLoc, Sym))
- return true;
- if(isABI_N64())
- TOut.emitRRX(Mips::DADDiu, ATReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
- else
- TOut.emitRRX(Mips::ADDiu, ATReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
+ uint32_t LoImmOp64 = Lo_32(ImmOp64);
+ uint32_t HiImmOp64 = Hi_32(ImmOp64);
- if(isABI_N32() || isABI_N64())
- TOut.emitRRI(Mips::LD, FirstReg, ATReg, 0, IDLoc, STI);
- else {
- TOut.emitRRI(Mips::LW, FirstReg, ATReg, 0, IDLoc, STI);
- TOut.emitRRI(Mips::LW, nextReg(FirstReg), ATReg, 4, IDLoc, STI);
- }
- return false;
- } else { // if(!IsGPR && !IsSingle)
- if ((LoImmOp64 == 0) &&
- !((HiImmOp64 & 0xffff0000) && (HiImmOp64 & 0x0000ffff))) {
- // FIXME: In the case where the constant is zero, we can load the
- // register directly from the zero register.
- if (loadImmediate(HiImmOp64, ATReg, Mips::NoRegister, true, true, IDLoc,
+ unsigned TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
+ return true;
+
+ if ((LoImmOp64 == 0) &&
+ !((HiImmOp64 & 0xffff0000) && (HiImmOp64 & 0x0000ffff))) {
+ // FIXME: In the case where the constant is zero, we can load the
+ // register directly from the zero register.
+
+ if (isABI_N32() || isABI_N64()) {
+ if (loadImmediate(ImmOp64, TmpReg, Mips::NoRegister, false, false, IDLoc,
Out, STI))
return true;
- if (isABI_N32() || isABI_N64())
- TOut.emitRR(Mips::DMTC1, FirstReg, ATReg, IDLoc, STI);
- else if (hasMips32r2()) {
- TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
- TOut.emitRRR(Mips::MTHC1_D32, FirstReg, FirstReg, ATReg, IDLoc, STI);
- } else {
- TOut.emitRR(Mips::MTC1, nextReg(FirstReg), ATReg, IDLoc, STI);
- TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
- }
+ TOut.emitRR(Mips::DMTC1, FirstReg, TmpReg, IDLoc, STI);
return false;
}
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
- // where appropriate.
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ if (loadImmediate(HiImmOp64, TmpReg, Mips::NoRegister, true, false, IDLoc,
+ Out, STI))
+ return true;
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+ if (hasMips32r2()) {
+ TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
+ TOut.emitRRR(Mips::MTHC1_D32, FirstReg, FirstReg, TmpReg, IDLoc, STI);
+ } else {
+ TOut.emitRR(Mips::MTC1, nextReg(FirstReg), TmpReg, IDLoc, STI);
+ TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
+ }
+ return false;
+ }
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(HiImmOp64, 4);
- getStreamer().EmitIntValue(LoImmOp64, 4);
- getStreamer().SwitchSection(CS);
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
+ // where appropriate.
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
- if(emitPartialAddress(TOut, IDLoc, Sym))
- return true;
- TOut.emitRRX(Is64FPU ? Mips::LDC164 : Mips::LDC1, FirstReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
- }
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitValueToAlignment(8);
+ getStreamer().EmitIntValue(ImmOp64, 8);
+ getStreamer().SwitchSection(CS);
+
+ if (emitPartialAddress(TOut, IDLoc, Sym))
+ return true;
+
+ TOut.emitRRX(Is64FPU ? Mips::LDC164 : Mips::LDC1, FirstReg, TmpReg,
+ MCOperand::createExpr(LoExpr), IDLoc, STI);
+
return false;
}
bool MipsAsmParser::expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(getInstDesc(Inst.getOpcode()).getNumOperands() == 1 &&
"unexpected number of operands");
MCOperand Offset = Inst.getOperand(0);
if (Offset.isExpr()) {
Inst.clear();
Inst.setOpcode(Mips::BEQ_MM);
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
Inst.addOperand(MCOperand::createExpr(Offset.getExpr()));
} else {
assert(Offset.isImm() && "expected immediate operand kind");
if (isInt<11>(Offset.getImm())) {
// If offset fits into 11 bits then this instruction becomes microMIPS
// 16-bit unconditional branch instruction.
if (inMicroMipsMode())
Inst.setOpcode(hasMips32r6() ? Mips::BC16_MMR6 : Mips::B16_MM);
} else {
if (!isInt<17>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
if (OffsetToAlignment(Offset.getImm(), 1LL << 1))
return Error(IDLoc, "branch to misaligned address");
Inst.clear();
Inst.setOpcode(Mips::BEQ_MM);
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
Inst.addOperand(MCOperand::createImm(Offset.getImm()));
}
}
Out.EmitInstruction(Inst, *STI);
// If .set reorder is active and branch instruction has a delay slot,
// emit a NOP after it.
const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
TOut.emitEmptyDelaySlot(true, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &ImmOp = Inst.getOperand(1);
assert(ImmOp.isImm() && "expected immediate operand kind");
const MCOperand &MemOffsetOp = Inst.getOperand(2);
assert((MemOffsetOp.isImm() || MemOffsetOp.isExpr()) &&
"expected immediate or expression operand");
bool IsLikely = false;
unsigned OpCode = 0;
switch(Inst.getOpcode()) {
case Mips::BneImm:
OpCode = Mips::BNE;
break;
case Mips::BeqImm:
OpCode = Mips::BEQ;
break;
case Mips::BEQLImmMacro:
OpCode = Mips::BEQL;
IsLikely = true;
break;
case Mips::BNELImmMacro:
OpCode = Mips::BNEL;
IsLikely = true;
break;
default:
llvm_unreachable("Unknown immediate branch pseudo-instruction.");
break;
}
int64_t ImmValue = ImmOp.getImm();
if (ImmValue == 0) {
if (IsLikely) {
TOut.emitRRX(OpCode, DstRegOp.getReg(), Mips::ZERO,
MCOperand::createExpr(MemOffsetOp.getExpr()), IDLoc, STI);
TOut.emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
} else
TOut.emitRRX(OpCode, DstRegOp.getReg(), Mips::ZERO, MemOffsetOp, IDLoc,
STI);
} else {
warnIfNoMacro(IDLoc);
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), true,
IDLoc, Out, STI))
return true;
if (IsLikely) {
TOut.emitRRX(OpCode, DstRegOp.getReg(), ATReg,
MCOperand::createExpr(MemOffsetOp.getExpr()), IDLoc, STI);
TOut.emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
} else
TOut.emitRRX(OpCode, DstRegOp.getReg(), ATReg, MemOffsetOp, IDLoc, STI);
}
return false;
}
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI, bool IsLoad) {
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &BaseRegOp = Inst.getOperand(1);
assert(BaseRegOp.isReg() && "expected register operand kind");
const MCOperand &OffsetOp = Inst.getOperand(2);
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = DstRegOp.getReg();
unsigned BaseReg = BaseRegOp.getReg();
unsigned TmpReg = DstReg;
const MCInstrDesc &Desc = getInstDesc(Inst.getOpcode());
int16_t DstRegClass = Desc.OpInfo[0].RegClass;
unsigned DstRegClassID =
getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
(DstRegClassID == Mips::GPR64RegClassID);
if (!IsLoad || !IsGPR || (BaseReg == DstReg)) {
// At this point we need AT to perform the expansions
// and we exit if it is not available.
TmpReg = getATReg(IDLoc);
if (!TmpReg)
return;
}
if (OffsetOp.isImm()) {
int64_t LoOffset = OffsetOp.getImm() & 0xffff;
int64_t HiOffset = OffsetOp.getImm() & ~0xffff;
// If msb of LoOffset is 1(negative number) we must increment
// HiOffset to account for the sign-extension of the low part.
if (LoOffset & 0x8000)
HiOffset += 0x10000;
bool IsLargeOffset = HiOffset != 0;
if (IsLargeOffset) {
bool Is32BitImm = (HiOffset >> 32) == 0;
if (loadImmediate(HiOffset, TmpReg, Mips::NoRegister, Is32BitImm, true,
IDLoc, Out, STI))
return;
}
if (BaseReg != Mips::ZERO && BaseReg != Mips::ZERO_64)
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
BaseReg, IDLoc, STI);
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
return;
}
assert(OffsetOp.isExpr() && "expected expression operand kind");
if (inPicMode()) {
// FIXME:
// a) Fix lw/sw $reg, symbol($reg) instruction expanding.
// b) If expression includes offset (sym + number), do not
// encode the offset into a relocation. Take it in account
// in the last load/store instruction.
// c) Check that immediates of R_MIPS_GOT16/R_MIPS_LO16 relocations
// do not exceed 16-bit.
// d) Use R_MIPS_GOT_PAGE/R_MIPS_GOT_OFST relocations instead
// of R_MIPS_GOT_DISP in appropriate cases to reduce number
// of GOT entries.
expandLoadAddress(TmpReg, Mips::NoRegister, OffsetOp, !ABI.ArePtrs64bit(),
IDLoc, Out, STI);
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, 0, IDLoc, STI);
} else {
const MCExpr *ExprOffset = OffsetOp.getExpr();
MCOperand LoOperand = MCOperand::createExpr(
MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
MCOperand HiOperand = MCOperand::createExpr(
MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
if (IsLoad)
TOut.emitLoadWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
LoOperand, TmpReg, IDLoc, STI);
else
TOut.emitStoreWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
LoOperand, TmpReg, IDLoc, STI);
}
}
bool MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
unsigned OpNum = Inst.getNumOperands();
unsigned Opcode = Inst.getOpcode();
unsigned NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM32_MM : Mips::LWM32_MM;
assert(Inst.getOperand(OpNum - 1).isImm() &&
Inst.getOperand(OpNum - 2).isReg() &&
Inst.getOperand(OpNum - 3).isReg() && "Invalid instruction operand.");
if (OpNum < 8 && Inst.getOperand(OpNum - 1).getImm() <= 60 &&
Inst.getOperand(OpNum - 1).getImm() >= 0 &&
(Inst.getOperand(OpNum - 2).getReg() == Mips::SP ||
Inst.getOperand(OpNum - 2).getReg() == Mips::SP_64) &&
(Inst.getOperand(OpNum - 3).getReg() == Mips::RA ||
Inst.getOperand(OpNum - 3).getReg() == Mips::RA_64)) {
// It can be implemented as SWM16 or LWM16 instruction.
if (inMicroMipsMode() && hasMips32r6())
NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM16_MMR6 : Mips::LWM16_MMR6;
else
NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM16_MM : Mips::LWM16_MM;
}
Inst.setOpcode(NewOpcode);
Out.EmitInstruction(Inst, *STI);
return false;
}
bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
bool EmittedNoMacroWarning = false;
unsigned PseudoOpcode = Inst.getOpcode();
unsigned SrcReg = Inst.getOperand(0).getReg();
const MCOperand &TrgOp = Inst.getOperand(1);
const MCExpr *OffsetExpr = Inst.getOperand(2).getExpr();
unsigned ZeroSrcOpcode, ZeroTrgOpcode;
bool ReverseOrderSLT, IsUnsigned, IsLikely, AcceptsEquality;
unsigned TrgReg;
if (TrgOp.isReg())
TrgReg = TrgOp.getReg();
else if (TrgOp.isImm()) {
warnIfNoMacro(IDLoc);
EmittedNoMacroWarning = true;
TrgReg = getATReg(IDLoc);
if (!TrgReg)
return true;
switch(PseudoOpcode) {
default:
llvm_unreachable("unknown opcode for branch pseudo-instruction");
case Mips::BLTImmMacro:
PseudoOpcode = Mips::BLT;
break;
case Mips::BLEImmMacro:
PseudoOpcode = Mips::BLE;
break;
case Mips::BGEImmMacro:
PseudoOpcode = Mips::BGE;
break;
case Mips::BGTImmMacro:
PseudoOpcode = Mips::BGT;
break;
case Mips::BLTUImmMacro:
PseudoOpcode = Mips::BLTU;
break;
case Mips::BLEUImmMacro:
PseudoOpcode = Mips::BLEU;
break;
case Mips::BGEUImmMacro:
PseudoOpcode = Mips::BGEU;
break;
case Mips::BGTUImmMacro:
PseudoOpcode = Mips::BGTU;
break;
case Mips::BLTLImmMacro:
PseudoOpcode = Mips::BLTL;
break;
case Mips::BLELImmMacro:
PseudoOpcode = Mips::BLEL;
break;
case Mips::BGELImmMacro:
PseudoOpcode = Mips::BGEL;
break;
case Mips::BGTLImmMacro:
PseudoOpcode = Mips::BGTL;
break;
case Mips::BLTULImmMacro:
PseudoOpcode = Mips::BLTUL;
break;
case Mips::BLEULImmMacro:
PseudoOpcode = Mips::BLEUL;
break;
case Mips::BGEULImmMacro:
PseudoOpcode = Mips::BGEUL;
break;
case Mips::BGTULImmMacro:
PseudoOpcode = Mips::BGTUL;
break;
}
if (loadImmediate(TrgOp.getImm(), TrgReg, Mips::NoRegister, !isGP64bit(),
false, IDLoc, Out, STI))
return true;
}
switch (PseudoOpcode) {
case Mips::BLT:
case Mips::BLTU:
case Mips::BLTL:
case Mips::BLTUL:
AcceptsEquality = false;
ReverseOrderSLT = false;
IsUnsigned =
((PseudoOpcode == Mips::BLTU) || (PseudoOpcode == Mips::BLTUL));
IsLikely = ((PseudoOpcode == Mips::BLTL) || (PseudoOpcode == Mips::BLTUL));
ZeroSrcOpcode = Mips::BGTZ;
ZeroTrgOpcode = Mips::BLTZ;
break;
case Mips::BLE:
case Mips::BLEU:
case Mips::BLEL:
case Mips::BLEUL:
AcceptsEquality = true;
ReverseOrderSLT = true;
IsUnsigned =
((PseudoOpcode == Mips::BLEU) || (PseudoOpcode == Mips::BLEUL));
IsLikely = ((PseudoOpcode == Mips::BLEL) || (PseudoOpcode == Mips::BLEUL));
ZeroSrcOpcode = Mips::BGEZ;
ZeroTrgOpcode = Mips::BLEZ;
break;
case Mips::BGE:
case Mips::BGEU:
case Mips::BGEL:
case Mips::BGEUL:
AcceptsEquality = true;
ReverseOrderSLT = false;
IsUnsigned =
((PseudoOpcode == Mips::BGEU) || (PseudoOpcode == Mips::BGEUL));
IsLikely = ((PseudoOpcode == Mips::BGEL) || (PseudoOpcode == Mips::BGEUL));
ZeroSrcOpcode = Mips::BLEZ;
ZeroTrgOpcode = Mips::BGEZ;
break;
case Mips::BGT:
case Mips::BGTU:
case Mips::BGTL:
case Mips::BGTUL:
AcceptsEquality = false;
ReverseOrderSLT = true;
IsUnsigned =
((PseudoOpcode == Mips::BGTU) || (PseudoOpcode == Mips::BGTUL));
IsLikely = ((PseudoOpcode == Mips::BGTL) || (PseudoOpcode == Mips::BGTUL));
ZeroSrcOpcode = Mips::BLTZ;
ZeroTrgOpcode = Mips::BGTZ;
break;
default:
llvm_unreachable("unknown opcode for branch pseudo-instruction");
}
bool IsTrgRegZero = (TrgReg == Mips::ZERO);
bool IsSrcRegZero = (SrcReg == Mips::ZERO);
if (IsSrcRegZero && IsTrgRegZero) {
// FIXME: All of these Opcode-specific if's are needed for compatibility
// with GAS' behaviour. However, they may not generate the most efficient
// code in some circumstances.
if (PseudoOpcode == Mips::BLT) {
TOut.emitRX(Mips::BLTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
IDLoc, STI);
return false;
}
if (PseudoOpcode == Mips::BLE) {
TOut.emitRX(Mips::BLEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
if (PseudoOpcode == Mips::BGE) {
TOut.emitRX(Mips::BGEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
if (PseudoOpcode == Mips::BGT) {
TOut.emitRX(Mips::BGTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
IDLoc, STI);
return false;
}
if (PseudoOpcode == Mips::BGTU) {
TOut.emitRRX(Mips::BNE, Mips::ZERO, Mips::ZERO,
MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
if (AcceptsEquality) {
// If both registers are $0 and the pseudo-branch accepts equality, it
// will always be taken, so we emit an unconditional branch.
TOut.emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
MCOperand::createExpr(OffsetExpr), IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
// If both registers are $0 and the pseudo-branch does not accept
// equality, it will never be taken, so we don't have to emit anything.
return false;
}
if (IsSrcRegZero || IsTrgRegZero) {
if ((IsSrcRegZero && PseudoOpcode == Mips::BGTU) ||
(IsTrgRegZero && PseudoOpcode == Mips::BLTU)) {
// If the $rs is $0 and the pseudo-branch is BGTU (0 > x) or
// if the $rt is $0 and the pseudo-branch is BLTU (x < 0),
// the pseudo-branch will never be taken, so we don't emit anything.
// This only applies to unsigned pseudo-branches.
return false;
}
if ((IsSrcRegZero && PseudoOpcode == Mips::BLEU) ||
(IsTrgRegZero && PseudoOpcode == Mips::BGEU)) {
// If the $rs is $0 and the pseudo-branch is BLEU (0 <= x) or
// if the $rt is $0 and the pseudo-branch is BGEU (x >= 0),
// the pseudo-branch will always be taken, so we emit an unconditional
// branch.
// This only applies to unsigned pseudo-branches.
TOut.emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
MCOperand::createExpr(OffsetExpr), IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
if (IsUnsigned) {
// If the $rs is $0 and the pseudo-branch is BLTU (0 < x) or
// if the $rt is $0 and the pseudo-branch is BGTU (x > 0),
// the pseudo-branch will be taken only when the non-zero register is
// different from 0, so we emit a BNEZ.
//
// If the $rs is $0 and the pseudo-branch is BGEU (0 >= x) or
// if the $rt is $0 and the pseudo-branch is BLEU (x <= 0),
// the pseudo-branch will be taken only when the non-zero register is
// equal to 0, so we emit a BEQZ.
//
// Because only BLEU and BGEU branch on equality, we can use the
// AcceptsEquality variable to decide when to emit the BEQZ.
TOut.emitRRX(AcceptsEquality ? Mips::BEQ : Mips::BNE,
IsSrcRegZero ? TrgReg : SrcReg, Mips::ZERO,
MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
// If we have a signed pseudo-branch and one of the registers is $0,
// we can use an appropriate compare-to-zero branch. We select which one
// to use in the switch statement above.
TOut.emitRX(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode,
IsSrcRegZero ? TrgReg : SrcReg,
MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
// If neither the SrcReg nor the TrgReg are $0, we need AT to perform the
// expansions. If it is not available, we return.
unsigned ATRegNum = getATReg(IDLoc);
if (!ATRegNum)
return true;
if (!EmittedNoMacroWarning)
warnIfNoMacro(IDLoc);
// SLT fits well with 2 of our 4 pseudo-branches:
// BLT, where $rs < $rt, translates into "slt $at, $rs, $rt" and
// BGT, where $rs > $rt, translates into "slt $at, $rt, $rs".
// If the result of the SLT is 1, we branch, and if it's 0, we don't.
// This is accomplished by using a BNEZ with the result of the SLT.
//
// The other 2 pseudo-branches are opposites of the above 2 (BGE with BLT
// and BLE with BGT), so we change the BNEZ into a BEQZ.
// Because only BGE and BLE branch on equality, we can use the
// AcceptsEquality variable to decide when to emit the BEQZ.
// Note that the order of the SLT arguments doesn't change between
// opposites.
//
// The same applies to the unsigned variants, except that SLTu is used
// instead of SLT.
TOut.emitRRR(IsUnsigned ? Mips::SLTu : Mips::SLT, ATRegNum,
ReverseOrderSLT ? TrgReg : SrcReg,
ReverseOrderSLT ? SrcReg : TrgReg, IDLoc, STI);
TOut.emitRRX(IsLikely ? (AcceptsEquality ? Mips::BEQL : Mips::BNEL)
: (AcceptsEquality ? Mips::BEQ : Mips::BNE),
ATRegNum, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
STI);
return false;
}
// Expand a integer division macro.
//
// Notably we don't have to emit a warning when encountering $rt as the $zero
// register, or 0 as an immediate. processInstruction() has already done that.
//
// The destination register can only be $zero when expanding (S)DivIMacro or
// D(S)DivMacro.
bool MipsAsmParser::expandDivRem(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI, const bool IsMips64,
const bool Signed) {
MipsTargetStreamer &TOut = getTargetStreamer();
warnIfNoMacro(IDLoc);
const MCOperand &RdRegOp = Inst.getOperand(0);
assert(RdRegOp.isReg() && "expected register operand kind");
unsigned RdReg = RdRegOp.getReg();
const MCOperand &RsRegOp = Inst.getOperand(1);
assert(RsRegOp.isReg() && "expected register operand kind");
unsigned RsReg = RsRegOp.getReg();
unsigned RtReg;
int64_t ImmValue;
const MCOperand &RtOp = Inst.getOperand(2);
assert((RtOp.isReg() || RtOp.isImm()) &&
"expected register or immediate operand kind");
if (RtOp.isReg())
RtReg = RtOp.getReg();
else
ImmValue = RtOp.getImm();
unsigned DivOp;
unsigned ZeroReg;
unsigned SubOp;
if (IsMips64) {
DivOp = Signed ? Mips::DSDIV : Mips::DUDIV;
ZeroReg = Mips::ZERO_64;
SubOp = Mips::DSUB;
} else {
DivOp = Signed ? Mips::SDIV : Mips::UDIV;
ZeroReg = Mips::ZERO;
SubOp = Mips::SUB;
}
bool UseTraps = useTraps();
unsigned Opcode = Inst.getOpcode();
bool isDiv = Opcode == Mips::SDivMacro || Opcode == Mips::SDivIMacro ||
Opcode == Mips::UDivMacro || Opcode == Mips::UDivIMacro ||
Opcode == Mips::DSDivMacro || Opcode == Mips::DSDivIMacro ||
Opcode == Mips::DUDivMacro || Opcode == Mips::DUDivIMacro;
bool isRem = Opcode == Mips::SRemMacro || Opcode == Mips::SRemIMacro ||
Opcode == Mips::URemMacro || Opcode == Mips::URemIMacro ||
Opcode == Mips::DSRemMacro || Opcode == Mips::DSRemIMacro ||
Opcode == Mips::DURemMacro || Opcode == Mips::DURemIMacro;
if (RtOp.isImm()) {
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if (ImmValue == 0) {
if (UseTraps)
TOut.emitRRI(Mips::TEQ, ZeroReg, ZeroReg, 0x7, IDLoc, STI);
else
TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
return false;
}
if (isRem && (ImmValue == 1 || (Signed && (ImmValue == -1)))) {
TOut.emitRRR(Mips::OR, RdReg, ZeroReg, ZeroReg, IDLoc, STI);
return false;
} else if (isDiv && ImmValue == 1) {
TOut.emitRRR(Mips::OR, RdReg, RsReg, Mips::ZERO, IDLoc, STI);
return false;
} else if (isDiv && Signed && ImmValue == -1) {
TOut.emitRRR(SubOp, RdReg, ZeroReg, RsReg, IDLoc, STI);
return false;
} else {
if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, isInt<32>(ImmValue),
false, Inst.getLoc(), Out, STI))
return true;
TOut.emitRR(DivOp, RsReg, ATReg, IDLoc, STI);
TOut.emitR(isDiv ? Mips::MFLO : Mips::MFHI, RdReg, IDLoc, STI);
return false;
}
return true;
}
// If the macro expansion of (d)div(u) or (d)rem(u) would always trap or
// break, insert the trap/break and exit. This gives a different result to
// GAS. GAS has an inconsistency/missed optimization in that not all cases
// are handled equivalently. As the observed behaviour is the same, we're ok.
if (RtReg == Mips::ZERO || RtReg == Mips::ZERO_64) {
if (UseTraps) {
TOut.emitRRI(Mips::TEQ, ZeroReg, ZeroReg, 0x7, IDLoc, STI);
return false;
}
TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
return false;
}
// (d)rem(u) $0, $X, $Y is a special case. Like div $zero, $X, $Y, it does
// not expand to macro sequence.
if (isRem && (RdReg == Mips::ZERO || RdReg == Mips::ZERO_64)) {
TOut.emitRR(DivOp, RsReg, RtReg, IDLoc, STI);
return false;
}
// Temporary label for first branch traget
MCContext &Context = TOut.getStreamer().getContext();
MCSymbol *BrTarget;
MCOperand LabelOp;
if (UseTraps) {
TOut.emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, STI);
} else {
// Branch to the li instruction.
BrTarget = Context.createTempSymbol();
LabelOp = MCOperand::createExpr(MCSymbolRefExpr::create(BrTarget, Context));
TOut.emitRRX(Mips::BNE, RtReg, ZeroReg, LabelOp, IDLoc, STI);
}
TOut.emitRR(DivOp, RsReg, RtReg, IDLoc, STI);
if (!UseTraps)
TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
if (!Signed) {
if (!UseTraps)
TOut.getStreamer().EmitLabel(BrTarget);
TOut.emitR(isDiv ? Mips::MFLO : Mips::MFHI, RdReg, IDLoc, STI);
return false;
}
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if (!UseTraps)
TOut.getStreamer().EmitLabel(BrTarget);
TOut.emitRRI(Mips::ADDiu, ATReg, ZeroReg, -1, IDLoc, STI);
// Temporary label for the second branch target.
MCSymbol *BrTargetEnd = Context.createTempSymbol();
MCOperand LabelOpEnd =
MCOperand::createExpr(MCSymbolRefExpr::create(BrTargetEnd, Context));
// Branch to the mflo instruction.
TOut.emitRRX(Mips::BNE, RtReg, ATReg, LabelOpEnd, IDLoc, STI);
if (IsMips64) {
TOut.emitRRI(Mips::ADDiu, ATReg, ZeroReg, 1, IDLoc, STI);
TOut.emitDSLL(ATReg, ATReg, 63, IDLoc, STI);
} else {
TOut.emitRI(Mips::LUi, ATReg, (uint16_t)0x8000, IDLoc, STI);
}
if (UseTraps)
TOut.emitRRI(Mips::TEQ, RsReg, ATReg, 0x6, IDLoc, STI);
else {
// Branch to the mflo instruction.
TOut.emitRRX(Mips::BNE, RsReg, ATReg, LabelOpEnd, IDLoc, STI);
TOut.emitNop(IDLoc, STI);
TOut.emitII(Mips::BREAK, 0x6, 0, IDLoc, STI);
}
TOut.getStreamer().EmitLabel(BrTargetEnd);
TOut.emitR(isDiv ? Mips::MFLO : Mips::MFHI, RdReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandTrunc(MCInst &Inst, bool IsDouble, bool Is64FPU,
SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isReg() && "Invalid instruction operand.");
unsigned FirstReg = Inst.getOperand(0).getReg();
unsigned SecondReg = Inst.getOperand(1).getReg();
unsigned ThirdReg = Inst.getOperand(2).getReg();
if (hasMips1() && !hasMips2()) {
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TOut.emitRR(Mips::CFC1, ThirdReg, Mips::RA, IDLoc, STI);
TOut.emitRR(Mips::CFC1, ThirdReg, Mips::RA, IDLoc, STI);
TOut.emitNop(IDLoc, STI);
TOut.emitRRI(Mips::ORi, ATReg, ThirdReg, 0x3, IDLoc, STI);
TOut.emitRRI(Mips::XORi, ATReg, ATReg, 0x2, IDLoc, STI);
TOut.emitRR(Mips::CTC1, Mips::RA, ATReg, IDLoc, STI);
TOut.emitNop(IDLoc, STI);
TOut.emitRR(IsDouble ? (Is64FPU ? Mips::CVT_W_D64 : Mips::CVT_W_D32)
: Mips::CVT_W_S,
FirstReg, SecondReg, IDLoc, STI);
TOut.emitRR(Mips::CTC1, Mips::RA, ThirdReg, IDLoc, STI);
TOut.emitNop(IDLoc, STI);
return false;
}
TOut.emitRR(IsDouble ? (Is64FPU ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32)
: Mips::TRUNC_W_S,
FirstReg, SecondReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI) {
if (hasMips32r6() || hasMips64r6()) {
return Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
}
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &SrcRegOp = Inst.getOperand(1);
assert(SrcRegOp.isReg() && "expected register operand kind");
const MCOperand &OffsetImmOp = Inst.getOperand(2);
assert(OffsetImmOp.isImm() && "expected immediate operand kind");
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = DstRegOp.getReg();
unsigned SrcReg = SrcRegOp.getReg();
int64_t OffsetValue = OffsetImmOp.getImm();
// NOTE: We always need AT for ULHU, as it is always used as the source
// register for one of the LBu's.
warnIfNoMacro(IDLoc);
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
bool IsLargeOffset = !(isInt<16>(OffsetValue + 1) && isInt<16>(OffsetValue));
if (IsLargeOffset) {
if (loadImmediate(OffsetValue, ATReg, SrcReg, !ABI.ArePtrs64bit(), true,
IDLoc, Out, STI))
return true;
}
int64_t FirstOffset = IsLargeOffset ? 0 : OffsetValue;
int64_t SecondOffset = IsLargeOffset ? 1 : (OffsetValue + 1);
if (isLittle())
std::swap(FirstOffset, SecondOffset);
unsigned FirstLbuDstReg = IsLargeOffset ? DstReg : ATReg;
unsigned SecondLbuDstReg = IsLargeOffset ? ATReg : DstReg;
unsigned LbuSrcReg = IsLargeOffset ? ATReg : SrcReg;
unsigned SllReg = IsLargeOffset ? DstReg : ATReg;
TOut.emitRRI(Signed ? Mips::LB : Mips::LBu, FirstLbuDstReg, LbuSrcReg,
FirstOffset, IDLoc, STI);
TOut.emitRRI(Mips::LBu, SecondLbuDstReg, LbuSrcReg, SecondOffset, IDLoc, STI);
TOut.emitRRI(Mips::SLL, SllReg, SllReg, 8, IDLoc, STI);
TOut.emitRRR(Mips::OR, DstReg, DstReg, ATReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandUsh(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
if (hasMips32r6() || hasMips64r6()) {
return Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
}
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &SrcRegOp = Inst.getOperand(1);
assert(SrcRegOp.isReg() && "expected register operand kind");
const MCOperand &OffsetImmOp = Inst.getOperand(2);
assert(OffsetImmOp.isImm() && "expected immediate operand kind");
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = DstRegOp.getReg();
unsigned SrcReg = SrcRegOp.getReg();
int64_t OffsetValue = OffsetImmOp.getImm();
warnIfNoMacro(IDLoc);
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
bool IsLargeOffset = !(isInt<16>(OffsetValue + 1) && isInt<16>(OffsetValue));
if (IsLargeOffset) {
if (loadImmediate(OffsetValue, ATReg, SrcReg, !ABI.ArePtrs64bit(), true,
IDLoc, Out, STI))
return true;
}
int64_t FirstOffset = IsLargeOffset ? 1 : (OffsetValue + 1);
int64_t SecondOffset = IsLargeOffset ? 0 : OffsetValue;
if (isLittle())
std::swap(FirstOffset, SecondOffset);
if (IsLargeOffset) {
TOut.emitRRI(Mips::SB, DstReg, ATReg, FirstOffset, IDLoc, STI);
TOut.emitRRI(Mips::SRL, DstReg, DstReg, 8, IDLoc, STI);
TOut.emitRRI(Mips::SB, DstReg, ATReg, SecondOffset, IDLoc, STI);
TOut.emitRRI(Mips::LBu, ATReg, ATReg, 0, IDLoc, STI);
TOut.emitRRI(Mips::SLL, DstReg, DstReg, 8, IDLoc, STI);
TOut.emitRRR(Mips::OR, DstReg, DstReg, ATReg, IDLoc, STI);
} else {
TOut.emitRRI(Mips::SB, DstReg, SrcReg, FirstOffset, IDLoc, STI);
TOut.emitRRI(Mips::SRL, ATReg, DstReg, 8, IDLoc, STI);
TOut.emitRRI(Mips::SB, ATReg, SrcReg, SecondOffset, IDLoc, STI);
}
return false;
}
bool MipsAsmParser::expandUxw(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
if (hasMips32r6() || hasMips64r6()) {
return Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
}
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &SrcRegOp = Inst.getOperand(1);
assert(SrcRegOp.isReg() && "expected register operand kind");
const MCOperand &OffsetImmOp = Inst.getOperand(2);
assert(OffsetImmOp.isImm() && "expected immediate operand kind");
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = DstRegOp.getReg();
unsigned SrcReg = SrcRegOp.getReg();
int64_t OffsetValue = OffsetImmOp.getImm();
// Compute left/right load/store offsets.
bool IsLargeOffset = !(isInt<16>(OffsetValue + 3) && isInt<16>(OffsetValue));
int64_t LxlOffset = IsLargeOffset ? 0 : OffsetValue;
int64_t LxrOffset = IsLargeOffset ? 3 : (OffsetValue + 3);
if (isLittle())
std::swap(LxlOffset, LxrOffset);
bool IsLoadInst = (Inst.getOpcode() == Mips::Ulw);
bool DoMove = IsLoadInst && (SrcReg == DstReg) && !IsLargeOffset;
unsigned TmpReg = SrcReg;
if (IsLargeOffset || DoMove) {
warnIfNoMacro(IDLoc);
TmpReg = getATReg(IDLoc);
if (!TmpReg)
return true;
}
if (IsLargeOffset) {
if (loadImmediate(OffsetValue, TmpReg, SrcReg, !ABI.ArePtrs64bit(), true,
IDLoc, Out, STI))
return true;
}
if (DoMove)
std::swap(DstReg, TmpReg);
unsigned XWL = IsLoadInst ? Mips::LWL : Mips::SWL;
unsigned XWR = IsLoadInst ? Mips::LWR : Mips::SWR;
TOut.emitRRI(XWL, DstReg, TmpReg, LxlOffset, IDLoc, STI);
TOut.emitRRI(XWR, DstReg, TmpReg, LxrOffset, IDLoc, STI);
if (DoMove)
TOut.emitRRR(Mips::OR, TmpReg, DstReg, Mips::ZERO, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandSge(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isReg() && "Invalid instruction operand.");
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned OpReg = Inst.getOperand(2).getReg();
unsigned OpCode;
warnIfNoMacro(IDLoc);
switch (Inst.getOpcode()) {
case Mips::SGE:
OpCode = Mips::SLT;
break;
case Mips::SGEU:
OpCode = Mips::SLTu;
break;
default:
llvm_unreachable("unexpected 'sge' opcode");
}
// $SrcReg >= $OpReg is equal to (not ($SrcReg < $OpReg))
TOut.emitRRR(OpCode, DstReg, SrcReg, OpReg, IDLoc, STI);
TOut.emitRRI(Mips::XORi, DstReg, DstReg, 1, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandSgeImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isImm() && "Invalid instruction operand.");
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
int64_t ImmValue = Inst.getOperand(2).getImm();
unsigned OpRegCode, OpImmCode;
warnIfNoMacro(IDLoc);
switch (Inst.getOpcode()) {
case Mips::SGEImm:
case Mips::SGEImm64:
OpRegCode = Mips::SLT;
OpImmCode = Mips::SLTi;
break;
case Mips::SGEUImm:
case Mips::SGEUImm64:
OpRegCode = Mips::SLTu;
OpImmCode = Mips::SLTiu;
break;
default:
llvm_unreachable("unexpected 'sge' opcode with immediate");
}
// $SrcReg >= Imm is equal to (not ($SrcReg < Imm))
if (isInt<16>(ImmValue)) {
// Use immediate version of STL.
TOut.emitRRI(OpImmCode, DstReg, SrcReg, ImmValue, IDLoc, STI);
TOut.emitRRI(Mips::XORi, DstReg, DstReg, 1, IDLoc, STI);
} else {
unsigned ImmReg = DstReg;
if (DstReg == SrcReg) {
unsigned ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
ImmReg = ATReg;
}
if (loadImmediate(ImmValue, ImmReg, Mips::NoRegister, isInt<32>(ImmValue),
false, IDLoc, Out, STI))
return true;
TOut.emitRRR(OpRegCode, DstReg, SrcReg, ImmReg, IDLoc, STI);
TOut.emitRRI(Mips::XORi, DstReg, DstReg, 1, IDLoc, STI);
}
return false;
}
bool MipsAsmParser::expandSgtImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isImm() && "Invalid instruction operand.");
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned ImmReg = DstReg;
int64_t ImmValue = Inst.getOperand(2).getImm();
unsigned OpCode;
warnIfNoMacro(IDLoc);
switch (Inst.getOpcode()) {
case Mips::SGTImm:
case Mips::SGTImm64:
OpCode = Mips::SLT;
break;
case Mips::SGTUImm:
case Mips::SGTUImm64:
OpCode = Mips::SLTu;
break;
default:
llvm_unreachable("unexpected 'sgt' opcode with immediate");
}
if (DstReg == SrcReg) {
unsigned ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
ImmReg = ATReg;
}
if (loadImmediate(ImmValue, ImmReg, Mips::NoRegister, isInt<32>(ImmValue),
false, IDLoc, Out, STI))
return true;
// $SrcReg > $ImmReg is equal to $ImmReg < $SrcReg
TOut.emitRRR(OpCode, DstReg, ImmReg, SrcReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandAliasImmediate(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isImm() && "Invalid instruction operand.");
unsigned ATReg = Mips::NoRegister;
unsigned FinalDstReg = Mips::NoRegister;
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
int64_t ImmValue = Inst.getOperand(2).getImm();
bool Is32Bit = isInt<32>(ImmValue) || (!isGP64bit() && isUInt<32>(ImmValue));
unsigned FinalOpcode = Inst.getOpcode();
if (DstReg == SrcReg) {
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
FinalDstReg = DstReg;
DstReg = ATReg;
}
if (!loadImmediate(ImmValue, DstReg, Mips::NoRegister, Is32Bit, false,
Inst.getLoc(), Out, STI)) {
switch (FinalOpcode) {
default:
llvm_unreachable("unimplemented expansion");
case Mips::ADDi:
FinalOpcode = Mips::ADD;
break;
case Mips::ADDiu:
FinalOpcode = Mips::ADDu;
break;
case Mips::ANDi:
FinalOpcode = Mips::AND;
break;
case Mips::NORImm:
FinalOpcode = Mips::NOR;
break;
case Mips::ORi:
FinalOpcode = Mips::OR;
break;
case Mips::SLTi:
FinalOpcode = Mips::SLT;
break;
case Mips::SLTiu:
FinalOpcode = Mips::SLTu;
break;
case Mips::XORi:
FinalOpcode = Mips::XOR;
break;
case Mips::ADDi_MM:
FinalOpcode = Mips::ADD_MM;
break;
case Mips::ADDiu_MM:
FinalOpcode = Mips::ADDu_MM;
break;
case Mips::ANDi_MM:
FinalOpcode = Mips::AND_MM;
break;
case Mips::ORi_MM:
FinalOpcode = Mips::OR_MM;
break;
case Mips::SLTi_MM:
FinalOpcode = Mips::SLT_MM;
break;
case Mips::SLTiu_MM:
FinalOpcode = Mips::SLTu_MM;
break;
case Mips::XORi_MM:
FinalOpcode = Mips::XOR_MM;
break;
case Mips::ANDi64:
FinalOpcode = Mips::AND64;
break;
case Mips::NORImm64:
FinalOpcode = Mips::NOR64;
break;
case Mips::ORi64:
FinalOpcode = Mips::OR64;
break;
case Mips::SLTImm64:
FinalOpcode = Mips::SLT64;
break;
case Mips::SLTUImm64:
FinalOpcode = Mips::SLTu64;
break;
case Mips::XORi64:
FinalOpcode = Mips::XOR64;
break;
}
if (FinalDstReg == Mips::NoRegister)
TOut.emitRRR(FinalOpcode, DstReg, DstReg, SrcReg, IDLoc, STI);
else
TOut.emitRRR(FinalOpcode, FinalDstReg, FinalDstReg, DstReg, IDLoc, STI);
return false;
}
return true;
}
bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
unsigned TReg = Inst.getOperand(2).getReg();
unsigned TmpReg = DReg;
unsigned FirstShift = Mips::NOP;
unsigned SecondShift = Mips::NOP;
if (hasMips32r2()) {
if (DReg == SReg) {
TmpReg = getATReg(Inst.getLoc());
if (!TmpReg)
return true;
}
if (Inst.getOpcode() == Mips::ROL) {
TOut.emitRRR(Mips::SUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
TOut.emitRRR(Mips::ROTRV, DReg, SReg, TmpReg, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::ROR) {
TOut.emitRRR(Mips::ROTRV, DReg, SReg, TReg, Inst.getLoc(), STI);
return false;
}
return true;
}
if (hasMips32()) {
switch (Inst.getOpcode()) {
default:
llvm_unreachable("unexpected instruction opcode");
case Mips::ROL:
FirstShift = Mips::SRLV;
SecondShift = Mips::SLLV;
break;
case Mips::ROR:
FirstShift = Mips::SLLV;
SecondShift = Mips::SRLV;
break;
}
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
TOut.emitRRR(Mips::SUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
TOut.emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), STI);
TOut.emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), STI);
TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
return true;
}
bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
int64_t ImmValue = Inst.getOperand(2).getImm();
unsigned FirstShift = Mips::NOP;
unsigned SecondShift = Mips::NOP;
if (hasMips32r2()) {
if (Inst.getOpcode() == Mips::ROLImm) {
uint64_t MaxShift = 32;
uint64_t ShiftValue = ImmValue;
if (ImmValue != 0)
ShiftValue = MaxShift - ImmValue;
TOut.emitRRI(Mips::ROTR, DReg, SReg, ShiftValue, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::RORImm) {
TOut.emitRRI(Mips::ROTR, DReg, SReg, ImmValue, Inst.getLoc(), STI);
return false;
}
return true;
}
if (hasMips32()) {
if (ImmValue == 0) {
TOut.emitRRI(Mips::SRL, DReg, SReg, 0, Inst.getLoc(), STI);
return false;
}
switch (Inst.getOpcode()) {
default:
llvm_unreachable("unexpected instruction opcode");
case Mips::ROLImm:
FirstShift = Mips::SLL;
SecondShift = Mips::SRL;
break;
case Mips::RORImm:
FirstShift = Mips::SRL;
SecondShift = Mips::SLL;
break;
}
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
TOut.emitRRI(FirstShift, ATReg, SReg, ImmValue, Inst.getLoc(), STI);
TOut.emitRRI(SecondShift, DReg, SReg, 32 - ImmValue, Inst.getLoc(), STI);
TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
return true;
}
bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
unsigned TReg = Inst.getOperand(2).getReg();
unsigned TmpReg = DReg;
unsigned FirstShift = Mips::NOP;
unsigned SecondShift = Mips::NOP;
if (hasMips64r2()) {
if (TmpReg == SReg) {
TmpReg = getATReg(Inst.getLoc());
if (!TmpReg)
return true;
}
if (Inst.getOpcode() == Mips::DROL) {
TOut.emitRRR(Mips::DSUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
TOut.emitRRR(Mips::DROTRV, DReg, SReg, TmpReg, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::DROR) {
TOut.emitRRR(Mips::DROTRV, DReg, SReg, TReg, Inst.getLoc(), STI);
return false;
}
return true;
}
if (hasMips64()) {
switch (Inst.getOpcode()) {
default:
llvm_unreachable("unexpected instruction opcode");
case Mips::DROL:
FirstShift = Mips::DSRLV;
SecondShift = Mips::DSLLV;
break;
case Mips::DROR:
FirstShift = Mips::DSLLV;
SecondShift = Mips::DSRLV;
break;
}
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
TOut.emitRRR(Mips::DSUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
TOut.emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), STI);
TOut.emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), STI);
TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
return true;
}
bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
int64_t ImmValue = Inst.getOperand(2).getImm() % 64;
unsigned FirstShift = Mips::NOP;
unsigned SecondShift = Mips::NOP;
MCInst TmpInst;
if (hasMips64r2()) {
unsigned FinalOpcode = Mips::NOP;
if (ImmValue == 0)
FinalOpcode = Mips::DROTR;
else if (ImmValue % 32 == 0)
FinalOpcode = Mips::DROTR32;
else if ((ImmValue >= 1) && (ImmValue <= 32)) {
if (Inst.getOpcode() == Mips::DROLImm)
FinalOpcode = Mips::DROTR32;
else
FinalOpcode = Mips::DROTR;
} else if (ImmValue >= 33) {
if (Inst.getOpcode() == Mips::DROLImm)
FinalOpcode = Mips::DROTR;
else
FinalOpcode = Mips::DROTR32;
}
uint64_t ShiftValue = ImmValue % 32;
if (Inst.getOpcode() == Mips::DROLImm)
ShiftValue = (32 - ImmValue % 32) % 32;
TOut.emitRRI(FinalOpcode, DReg, SReg, ShiftValue, Inst.getLoc(), STI);
return false;
}
if (hasMips64()) {
if (ImmValue == 0) {
TOut.emitRRI(Mips::DSRL, DReg, SReg, 0, Inst.getLoc(), STI);
return false;
}
switch (Inst.getOpcode()) {
default:
llvm_unreachable("unexpected instruction opcode");
case Mips::DROLImm:
if ((ImmValue >= 1) && (ImmValue <= 31)) {
FirstShift = Mips::DSLL;
SecondShift = Mips::DSRL32;
}
if (ImmValue == 32) {
FirstShift = Mips::DSLL32;
SecondShift = Mips::DSRL32;
}
if ((ImmValue >= 33) && (ImmValue <= 63)) {
FirstShift = Mips::DSLL32;
SecondShift = Mips::DSRL;
}
break;
case Mips::DRORImm:
if ((ImmValue >= 1) && (ImmValue <= 31)) {
FirstShift = Mips::DSRL;
SecondShift = Mips::DSLL32;
}
if (ImmValue == 32) {
FirstShift = Mips::DSRL32;
SecondShift = Mips::DSLL32;
}
if ((ImmValue >= 33) && (ImmValue <= 63)) {
FirstShift = Mips::DSRL32;
SecondShift = Mips::DSLL;
}
break;
}
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
TOut.emitRRI(FirstShift, ATReg, SReg, ImmValue % 32, Inst.getLoc(), STI);
TOut.emitRRI(SecondShift, DReg, SReg, (32 - ImmValue % 32) % 32,
Inst.getLoc(), STI);
TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
return true;
}
bool MipsAsmParser::expandAbs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned FirstRegOp = Inst.getOperand(0).getReg();
unsigned SecondRegOp = Inst.getOperand(1).getReg();
TOut.emitRI(Mips::BGEZ, SecondRegOp, 8, IDLoc, STI);
if (FirstRegOp != SecondRegOp)
TOut.emitRRR(Mips::ADDu, FirstRegOp, SecondRegOp, Mips::ZERO, IDLoc, STI);
else
TOut.emitEmptyDelaySlot(false, IDLoc, STI);
TOut.emitRRR(Mips::SUB, FirstRegOp, Mips::ZERO, SecondRegOp, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandMulImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
int32_t ImmValue = Inst.getOperand(2).getImm();
ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
loadImmediate(ImmValue, ATReg, Mips::NoRegister, true, false, IDLoc, Out,
STI);
TOut.emitRR(Inst.getOpcode() == Mips::MULImmMacro ? Mips::MULT : Mips::DMULT,
SrcReg, ATReg, IDLoc, STI);
TOut.emitR(Mips::MFLO, DstReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandMulO(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned TmpReg = Inst.getOperand(2).getReg();
ATReg = getATReg(Inst.getLoc());
if (!ATReg)
return true;
TOut.emitRR(Inst.getOpcode() == Mips::MULOMacro ? Mips::MULT : Mips::DMULT,
SrcReg, TmpReg, IDLoc, STI);
TOut.emitR(Mips::MFLO, DstReg, IDLoc, STI);
TOut.emitRRI(Inst.getOpcode() == Mips::MULOMacro ? Mips::SRA : Mips::DSRA32,
DstReg, DstReg, 0x1F, IDLoc, STI);
TOut.emitR(Mips::MFHI, ATReg, IDLoc, STI);
if (useTraps()) {
TOut.emitRRI(Mips::TNE, DstReg, ATReg, 6, IDLoc, STI);
} else {
MCContext & Context = TOut.getStreamer().getContext();
MCSymbol * BrTarget = Context.createTempSymbol();
MCOperand LabelOp =
MCOperand::createExpr(MCSymbolRefExpr::create(BrTarget, Context));
TOut.emitRRX(Mips::BEQ, DstReg, ATReg, LabelOp, IDLoc, STI);
if (AssemblerOptions.back()->isReorder())
TOut.emitNop(IDLoc, STI);
TOut.emitII(Mips::BREAK, 6, 0, IDLoc, STI);
TOut.getStreamer().EmitLabel(BrTarget);
}
TOut.emitR(Mips::MFLO, DstReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandMulOU(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned TmpReg = Inst.getOperand(2).getReg();
ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
TOut.emitRR(Inst.getOpcode() == Mips::MULOUMacro ? Mips::MULTu : Mips::DMULTu,
SrcReg, TmpReg, IDLoc, STI);
TOut.emitR(Mips::MFHI, ATReg, IDLoc, STI);
TOut.emitR(Mips::MFLO, DstReg, IDLoc, STI);
if (useTraps()) {
TOut.emitRRI(Mips::TNE, ATReg, Mips::ZERO, 6, IDLoc, STI);
} else {
MCContext & Context = TOut.getStreamer().getContext();
MCSymbol * BrTarget = Context.createTempSymbol();
MCOperand LabelOp =
MCOperand::createExpr(MCSymbolRefExpr::create(BrTarget, Context));
TOut.emitRRX(Mips::BEQ, ATReg, Mips::ZERO, LabelOp, IDLoc, STI);
if (AssemblerOptions.back()->isReorder())
TOut.emitNop(IDLoc, STI);
TOut.emitII(Mips::BREAK, 6, 0, IDLoc, STI);
TOut.getStreamer().EmitLabel(BrTarget);
}
return false;
}
bool MipsAsmParser::expandDMULMacro(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned TmpReg = Inst.getOperand(2).getReg();
TOut.emitRR(Mips::DMULTu, SrcReg, TmpReg, IDLoc, STI);
TOut.emitR(Mips::MFLO, DstReg, IDLoc, STI);
return false;
}
// Expand 'ld $<reg> offset($reg2)' to 'lw $<reg>, offset($reg2);
// lw $<reg+1>>, offset+4($reg2)'
// or expand 'sd $<reg> offset($reg2)' to 'sw $<reg>, offset($reg2);
// sw $<reg+1>>, offset+4($reg2)'
// for O32.
bool MipsAsmParser::expandLoadStoreDMacro(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI,
bool IsLoad) {
if (!isABI_O32())
return true;
warnIfNoMacro(IDLoc);
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned Opcode = IsLoad ? Mips::LW : Mips::SW;
unsigned FirstReg = Inst.getOperand(0).getReg();
unsigned SecondReg = nextReg(FirstReg);
unsigned BaseReg = Inst.getOperand(1).getReg();
if (!SecondReg)
return true;
warnIfRegIndexIsAT(FirstReg, IDLoc);
assert(Inst.getOperand(2).isImm() &&
"Offset for load macro is not immediate!");
MCOperand &FirstOffset = Inst.getOperand(2);
signed NextOffset = FirstOffset.getImm() + 4;
MCOperand SecondOffset = MCOperand::createImm(NextOffset);
if (!isInt<16>(FirstOffset.getImm()) || !isInt<16>(NextOffset))
return true;
// For loads, clobber the base register with the second load instead of the
// first if the BaseReg == FirstReg.
if (FirstReg != BaseReg || !IsLoad) {
TOut.emitRRX(Opcode, FirstReg, BaseReg, FirstOffset, IDLoc, STI);
TOut.emitRRX(Opcode, SecondReg, BaseReg, SecondOffset, IDLoc, STI);
} else {
TOut.emitRRX(Opcode, SecondReg, BaseReg, SecondOffset, IDLoc, STI);
TOut.emitRRX(Opcode, FirstReg, BaseReg, FirstOffset, IDLoc, STI);
}
return false;
}
// Expand 's.d $<reg> offset($reg2)' to 'swc1 $<reg+1>, offset($reg2);
// swc1 $<reg>, offset+4($reg2)'
// or if little endian to 'swc1 $<reg>, offset($reg2);
// swc1 $<reg+1>, offset+4($reg2)'
// for Mips1.
bool MipsAsmParser::expandStoreDM1Macro(MCInst &Inst, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
if (!isABI_O32())
return true;
warnIfNoMacro(IDLoc);
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned Opcode = Mips::SWC1;
unsigned FirstReg = Inst.getOperand(0).getReg();
unsigned SecondReg = nextReg(FirstReg);
unsigned BaseReg = Inst.getOperand(1).getReg();
if (!SecondReg)
return true;
warnIfRegIndexIsAT(FirstReg, IDLoc);
assert(Inst.getOperand(2).isImm() &&
"Offset for macro is not immediate!");
MCOperand &FirstOffset = Inst.getOperand(2);
signed NextOffset = FirstOffset.getImm() + 4;
MCOperand SecondOffset = MCOperand::createImm(NextOffset);
if (!isInt<16>(FirstOffset.getImm()) || !isInt<16>(NextOffset))
return true;
if (!IsLittleEndian)
std::swap(FirstReg, SecondReg);
TOut.emitRRX(Opcode, FirstReg, BaseReg, FirstOffset, IDLoc, STI);
TOut.emitRRX(Opcode, SecondReg, BaseReg, SecondOffset, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandSeq(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isReg() && "Invalid instruction operand.");
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
unsigned OpReg = Inst.getOperand(2).getReg();
warnIfNoMacro(IDLoc);
if (SrcReg != Mips::ZERO && OpReg != Mips::ZERO) {
TOut.emitRRR(Mips::XOR, DstReg, SrcReg, OpReg, IDLoc, STI);
TOut.emitRRI(Mips::SLTiu, DstReg, DstReg, 1, IDLoc, STI);
return false;
}
unsigned Reg = SrcReg == Mips::ZERO ? OpReg : SrcReg;
TOut.emitRRI(Mips::SLTiu, DstReg, Reg, 1, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 3 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() &&
Inst.getOperand(1).isReg() &&
Inst.getOperand(2).isImm() && "Invalid instruction operand.");
unsigned DstReg = Inst.getOperand(0).getReg();
unsigned SrcReg = Inst.getOperand(1).getReg();
int64_t Imm = Inst.getOperand(2).getImm();
warnIfNoMacro(IDLoc);
if (Imm == 0) {
TOut.emitRRI(Mips::SLTiu, DstReg, SrcReg, 1, IDLoc, STI);
return false;
}
if (SrcReg == Mips::ZERO) {
Warning(IDLoc, "comparison is always false");
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu,
DstReg, SrcReg, SrcReg, IDLoc, STI);
return false;
}
unsigned Opc;
if (Imm > -0x8000 && Imm < 0) {
Imm = -Imm;
Opc = isGP64bit() ? Mips::DADDiu : Mips::ADDiu;
} else {
Opc = Mips::XORi;
}
if (!isUInt<16>(Imm)) {
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if (loadImmediate(Imm, ATReg, Mips::NoRegister, true, isGP64bit(), IDLoc,
Out, STI))
return true;
TOut.emitRRR(Mips::XOR, DstReg, SrcReg, ATReg, IDLoc, STI);
TOut.emitRRI(Mips::SLTiu, DstReg, DstReg, 1, IDLoc, STI);
return false;
}
TOut.emitRRI(Opc, DstReg, SrcReg, Imm, IDLoc, STI);
TOut.emitRRI(Mips::SLTiu, DstReg, DstReg, 1, IDLoc, STI);
return false;
}
// Map the DSP accumulator and control register to the corresponding gpr
// operand. Unlike the other alias, the m(f|t)t(lo|hi|acx) instructions
// do not map the DSP registers contigously to gpr registers.
static unsigned getRegisterForMxtrDSP(MCInst &Inst, bool IsMFDSP) {
switch (Inst.getOpcode()) {
case Mips::MFTLO:
case Mips::MTTLO:
switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
case Mips::AC0:
return Mips::ZERO;
case Mips::AC1:
return Mips::A0;
case Mips::AC2:
return Mips::T0;
case Mips::AC3:
return Mips::T4;
default:
llvm_unreachable("Unknown register for 'mttr' alias!");
}
case Mips::MFTHI:
case Mips::MTTHI:
switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
case Mips::AC0:
return Mips::AT;
case Mips::AC1:
return Mips::A1;
case Mips::AC2:
return Mips::T1;
case Mips::AC3:
return Mips::T5;
default:
llvm_unreachable("Unknown register for 'mttr' alias!");
}
case Mips::MFTACX:
case Mips::MTTACX:
switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
case Mips::AC0:
return Mips::V0;
case Mips::AC1:
return Mips::A2;
case Mips::AC2:
return Mips::T2;
case Mips::AC3:
return Mips::T6;
default:
llvm_unreachable("Unknown register for 'mttr' alias!");
}
case Mips::MFTDSP:
case Mips::MTTDSP:
return Mips::S0;
default:
llvm_unreachable("Unknown instruction for 'mttr' dsp alias!");
}
}
// Map the floating point register operand to the corresponding register
// operand.
static unsigned getRegisterForMxtrFP(MCInst &Inst, bool IsMFTC1) {
switch (Inst.getOperand(IsMFTC1 ? 1 : 0).getReg()) {
case Mips::F0: return Mips::ZERO;
case Mips::F1: return Mips::AT;
case Mips::F2: return Mips::V0;
case Mips::F3: return Mips::V1;
case Mips::F4: return Mips::A0;
case Mips::F5: return Mips::A1;
case Mips::F6: return Mips::A2;
case Mips::F7: return Mips::A3;
case Mips::F8: return Mips::T0;
case Mips::F9: return Mips::T1;
case Mips::F10: return Mips::T2;
case Mips::F11: return Mips::T3;
case Mips::F12: return Mips::T4;
case Mips::F13: return Mips::T5;
case Mips::F14: return Mips::T6;
case Mips::F15: return Mips::T7;
case Mips::F16: return Mips::S0;
case Mips::F17: return Mips::S1;
case Mips::F18: return Mips::S2;
case Mips::F19: return Mips::S3;
case Mips::F20: return Mips::S4;
case Mips::F21: return Mips::S5;
case Mips::F22: return Mips::S6;
case Mips::F23: return Mips::S7;
case Mips::F24: return Mips::T8;
case Mips::F25: return Mips::T9;
case Mips::F26: return Mips::K0;
case Mips::F27: return Mips::K1;
case Mips::F28: return Mips::GP;
case Mips::F29: return Mips::SP;
case Mips::F30: return Mips::FP;
case Mips::F31: return Mips::RA;
default: llvm_unreachable("Unknown register for mttc1 alias!");
}
}
// Map the coprocessor operand the corresponding gpr register operand.
static unsigned getRegisterForMxtrC0(MCInst &Inst, bool IsMFTC0) {
switch (Inst.getOperand(IsMFTC0 ? 1 : 0).getReg()) {
case Mips::COP00: return Mips::ZERO;
case Mips::COP01: return Mips::AT;
case Mips::COP02: return Mips::V0;
case Mips::COP03: return Mips::V1;
case Mips::COP04: return Mips::A0;
case Mips::COP05: return Mips::A1;
case Mips::COP06: return Mips::A2;
case Mips::COP07: return Mips::A3;
case Mips::COP08: return Mips::T0;
case Mips::COP09: return Mips::T1;
case Mips::COP010: return Mips::T2;
case Mips::COP011: return Mips::T3;
case Mips::COP012: return Mips::T4;
case Mips::COP013: return Mips::T5;
case Mips::COP014: return Mips::T6;
case Mips::COP015: return Mips::T7;
case Mips::COP016: return Mips::S0;
case Mips::COP017: return Mips::S1;
case Mips::COP018: return Mips::S2;
case Mips::COP019: return Mips::S3;
case Mips::COP020: return Mips::S4;
case Mips::COP021: return Mips::S5;
case Mips::COP022: return Mips::S6;
case Mips::COP023: return Mips::S7;
case Mips::COP024: return Mips::T8;
case Mips::COP025: return Mips::T9;
case Mips::COP026: return Mips::K0;
case Mips::COP027: return Mips::K1;
case Mips::COP028: return Mips::GP;
case Mips::COP029: return Mips::SP;
case Mips::COP030: return Mips::FP;
case Mips::COP031: return Mips::RA;
default: llvm_unreachable("Unknown register for mttc0 alias!");
}
}
/// Expand an alias of 'mftr' or 'mttr' into the full instruction, by producing
/// an mftr or mttr with the correctly mapped gpr register, u, sel and h bits.
bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned rd = 0;
unsigned u = 1;
unsigned sel = 0;
unsigned h = 0;
bool IsMFTR = false;
switch (Inst.getOpcode()) {
case Mips::MFTC0:
IsMFTR = true;
LLVM_FALLTHROUGH;
case Mips::MTTC0:
u = 0;
rd = getRegisterForMxtrC0(Inst, IsMFTR);
sel = Inst.getOperand(2).getImm();
break;
case Mips::MFTGPR:
IsMFTR = true;
LLVM_FALLTHROUGH;
case Mips::MTTGPR:
rd = Inst.getOperand(IsMFTR ? 1 : 0).getReg();
break;
case Mips::MFTLO:
case Mips::MFTHI:
case Mips::MFTACX:
case Mips::MFTDSP:
IsMFTR = true;
LLVM_FALLTHROUGH;
case Mips::MTTLO:
case Mips::MTTHI:
case Mips::MTTACX:
case Mips::MTTDSP:
rd = getRegisterForMxtrDSP(Inst, IsMFTR);
sel = 1;
break;
case Mips::MFTHC1:
h = 1;
LLVM_FALLTHROUGH;
case Mips::MFTC1:
IsMFTR = true;
rd = getRegisterForMxtrFP(Inst, IsMFTR);
sel = 2;
break;
case Mips::MTTHC1:
h = 1;
LLVM_FALLTHROUGH;
case Mips::MTTC1:
rd = getRegisterForMxtrFP(Inst, IsMFTR);
sel = 2;
break;
case Mips::CFTC1:
IsMFTR = true;
LLVM_FALLTHROUGH;
case Mips::CTTC1:
rd = getRegisterForMxtrFP(Inst, IsMFTR);
sel = 3;
break;
}
unsigned Op0 = IsMFTR ? Inst.getOperand(0).getReg() : rd;
unsigned Op1 =
IsMFTR ? rd
: (Inst.getOpcode() != Mips::MTTDSP ? Inst.getOperand(1).getReg()
: Inst.getOperand(0).getReg());
TOut.emitRRIII(IsMFTR ? Mips::MFTR : Mips::MTTR, Op0, Op1, u, sel, h, IDLoc,
STI);
return false;
}
bool MipsAsmParser::expandSaaAddr(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI) {
assert(Inst.getNumOperands() == 3 && "expected three operands");
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
assert(Inst.getOperand(1).isReg() && "expected register operand kind");
warnIfNoMacro(IDLoc);
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned Opcode = Inst.getOpcode() == Mips::SaaAddr ? Mips::SAA : Mips::SAAD;
unsigned RtReg = Inst.getOperand(0).getReg();
unsigned BaseReg = Inst.getOperand(1).getReg();
const MCOperand &BaseOp = Inst.getOperand(2);
if (BaseOp.isImm()) {
int64_t ImmValue = BaseOp.getImm();
if (ImmValue == 0) {
TOut.emitRR(Opcode, RtReg, BaseReg, IDLoc, STI);
return false;
}
}
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
if (expandLoadAddress(ATReg, BaseReg, BaseOp, !isGP64bit(), IDLoc, Out, STI))
return true;
TOut.emitRR(Opcode, RtReg, ATReg, IDLoc, STI);
return false;
}
unsigned
MipsAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
const OperandVector &Operands) {
switch (Inst.getOpcode()) {
default:
return Match_Success;
case Mips::DATI:
case Mips::DAHI:
if (static_cast<MipsOperand &>(*Operands[1])
.isValidForTie(static_cast<MipsOperand &>(*Operands[2])))
return Match_Success;
return Match_RequiresSameSrcAndDst;
}
}
unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
switch (Inst.getOpcode()) {
// As described by the MIPSR6 spec, daui must not use the zero operand for
// its source operand.
case Mips::DAUI:
if (Inst.getOperand(1).getReg() == Mips::ZERO ||
Inst.getOperand(1).getReg() == Mips::ZERO_64)
return Match_RequiresNoZeroRegister;
return Match_Success;
// As described by the Mips32r2 spec, the registers Rd and Rs for
// jalr.hb must be different.
// It also applies for registers Rt and Rs of microMIPSr6 jalrc.hb instruction
// and registers Rd and Base for microMIPS lwp instruction
case Mips::JALR_HB:
case Mips::JALR_HB64:
case Mips::JALRC_HB_MMR6:
case Mips::JALRC_MMR6:
if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg())
return Match_RequiresDifferentSrcAndDst;
return Match_Success;
case Mips::LWP_MM:
if (Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg())
return Match_RequiresDifferentSrcAndDst;
return Match_Success;
case Mips::SYNC:
if (Inst.getOperand(0).getImm() != 0 && !hasMips32())
return Match_NonZeroOperandForSync;
return Match_Success;
case Mips::MFC0:
case Mips::MTC0:
case Mips::MTC2:
case Mips::MFC2:
if (Inst.getOperand(2).getImm() != 0 && !hasMips32())
return Match_NonZeroOperandForMTCX;
return Match_Success;
// As described the MIPSR6 spec, the compact branches that compare registers
// must:
// a) Not use the zero register.
// b) Not use the same register twice.
// c) rs < rt for bnec, beqc.
// NB: For this case, the encoding will swap the operands as their
// ordering doesn't matter. GAS performs this transformation too.
// Hence, that constraint does not have to be enforced.
//
// The compact branches that branch iff the signed addition of two registers
// would overflow must have rs >= rt. That can be handled like beqc/bnec with
// operand swapping. They do not have restriction of using the zero register.
case Mips::BLEZC: case Mips::BLEZC_MMR6:
case Mips::BGEZC: case Mips::BGEZC_MMR6:
case Mips::BGTZC: case Mips::BGTZC_MMR6:
case Mips::BLTZC: case Mips::BLTZC_MMR6:
case Mips::BEQZC: case Mips::BEQZC_MMR6:
case Mips::BNEZC: case Mips::BNEZC_MMR6:
case Mips::BLEZC64:
case Mips::BGEZC64:
case Mips::BGTZC64:
case Mips::BLTZC64:
case Mips::BEQZC64:
case Mips::BNEZC64:
if (Inst.getOperand(0).getReg() == Mips::ZERO ||
Inst.getOperand(0).getReg() == Mips::ZERO_64)
return Match_RequiresNoZeroRegister;
return Match_Success;
case Mips::BGEC: case Mips::BGEC_MMR6:
case Mips::BLTC: case Mips::BLTC_MMR6:
case Mips::BGEUC: case Mips::BGEUC_MMR6:
case Mips::BLTUC: case Mips::BLTUC_MMR6:
case Mips::BEQC: case Mips::BEQC_MMR6:
case Mips::BNEC: case Mips::BNEC_MMR6:
case Mips::BGEC64:
case Mips::BLTC64:
case Mips::BGEUC64:
case Mips::BLTUC64:
case Mips::BEQC64:
case Mips::BNEC64:
if (Inst.getOperand(0).getReg() == Mips::ZERO ||
Inst.getOperand(0).getReg() == Mips::ZERO_64)
return Match_RequiresNoZeroRegister;
if (Inst.getOperand(1).getReg() == Mips::ZERO ||
Inst.getOperand(1).getReg() == Mips::ZERO_64)
return Match_RequiresNoZeroRegister;
if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg())
return Match_RequiresDifferentOperands;
return Match_Success;
case Mips::DINS: {
assert(Inst.getOperand(2).isImm() && Inst.getOperand(3).isImm() &&
"Operands must be immediates for dins!");
const signed Pos = Inst.getOperand(2).getImm();
const signed Size = Inst.getOperand(3).getImm();
if ((0 > (Pos + Size)) || ((Pos + Size) > 32))
return Match_RequiresPosSizeRange0_32;
return Match_Success;
}
case Mips::DINSM:
case Mips::DINSU: {
assert(Inst.getOperand(2).isImm() && Inst.getOperand(3).isImm() &&
"Operands must be immediates for dinsm/dinsu!");
const signed Pos = Inst.getOperand(2).getImm();
const signed Size = Inst.getOperand(3).getImm();
if ((32 >= (Pos + Size)) || ((Pos + Size) > 64))
return Match_RequiresPosSizeRange33_64;
return Match_Success;
}
case Mips::DEXT: {
assert(Inst.getOperand(2).isImm() && Inst.getOperand(3).isImm() &&
"Operands must be immediates for DEXTM!");
const signed Pos = Inst.getOperand(2).getImm();
const signed Size = Inst.getOperand(3).getImm();
if ((1 > (Pos + Size)) || ((Pos + Size) > 63))
return Match_RequiresPosSizeUImm6;
return Match_Success;
}
case Mips::DEXTM:
case Mips::DEXTU: {
assert(Inst.getOperand(2).isImm() && Inst.getOperand(3).isImm() &&
"Operands must be immediates for dextm/dextu!");
const signed Pos = Inst.getOperand(2).getImm();
const signed Size = Inst.getOperand(3).getImm();
if ((32 > (Pos + Size)) || ((Pos + Size) > 64))
return Match_RequiresPosSizeRange33_64;
return Match_Success;
}
case Mips::CRC32B: case Mips::CRC32CB:
case Mips::CRC32H: case Mips::CRC32CH:
case Mips::CRC32W: case Mips::CRC32CW:
case Mips::CRC32D: case Mips::CRC32CD:
if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg())
return Match_RequiresSameSrcAndDst;
return Match_Success;
}
uint64_t TSFlags = getInstDesc(Inst.getOpcode()).TSFlags;
if ((TSFlags & MipsII::HasFCCRegOperand) &&
(Inst.getOperand(0).getReg() != Mips::FCC0) && !hasEightFccRegisters())
return Match_NoFCCRegisterForCurrentISA;
return Match_Success;
}
static SMLoc RefineErrorLoc(const SMLoc Loc, const OperandVector &Operands,
uint64_t ErrorInfo) {
if (ErrorInfo != ~0ULL && ErrorInfo < Operands.size()) {
SMLoc ErrorLoc = Operands[ErrorInfo]->getStartLoc();
if (ErrorLoc == SMLoc())
return Loc;
return ErrorLoc;
}
return Loc;
}
bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
MCInst Inst;
unsigned MatchResult =
MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
switch (MatchResult) {
case Match_Success:
if (processInstruction(Inst, IDLoc, Out, STI))
return true;
return false;
case Match_MissingFeature:
Error(IDLoc, "instruction requires a CPU feature not currently enabled");
return true;
case Match_InvalidOperand: {
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
ErrorLoc = Operands[ErrorInfo]->getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
return Error(ErrorLoc, "invalid operand for instruction");
}
case Match_NonZeroOperandForSync:
return Error(IDLoc,
"s-type must be zero or unspecified for pre-MIPS32 ISAs");
case Match_NonZeroOperandForMTCX:
return Error(IDLoc, "selector must be zero for pre-MIPS32 ISAs");
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction");
case Match_RequiresDifferentSrcAndDst:
return Error(IDLoc, "source and destination must be different");
case Match_RequiresDifferentOperands:
return Error(IDLoc, "registers must be different");
case Match_RequiresNoZeroRegister:
return Error(IDLoc, "invalid operand ($zero) for instruction");
case Match_RequiresSameSrcAndDst:
return Error(IDLoc, "source and destination must match");
case Match_NoFCCRegisterForCurrentISA:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"non-zero fcc register doesn't exist in current ISA level");
case Match_Immz:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected '0'");
case Match_UImm1_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 1-bit unsigned immediate");
case Match_UImm2_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 2-bit unsigned immediate");
case Match_UImm2_1:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 1 .. 4");
case Match_UImm3_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 3-bit unsigned immediate");
case Match_UImm4_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 4-bit unsigned immediate");
case Match_SImm4_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 4-bit signed immediate");
case Match_UImm5_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 5-bit unsigned immediate");
case Match_SImm5_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 5-bit signed immediate");
case Match_UImm5_1:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 1 .. 32");
case Match_UImm5_32:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 32 .. 63");
case Match_UImm5_33:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 33 .. 64");
case Match_UImm5_0_Report_UImm6:
// This is used on UImm5 operands that have a corresponding UImm5_32
// operand to avoid confusing the user.
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 6-bit unsigned immediate");
case Match_UImm5_Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected both 7-bit unsigned immediate and multiple of 4");
case Match_UImmRange2_64:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 2 .. 64");
case Match_UImm6_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 6-bit unsigned immediate");
case Match_UImm6_Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected both 8-bit unsigned immediate and multiple of 4");
case Match_SImm6_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 6-bit signed immediate");
case Match_UImm7_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 7-bit unsigned immediate");
case Match_UImm7_N1:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range -1 .. 126");
case Match_SImm7_Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected both 9-bit signed immediate and multiple of 4");
case Match_UImm8_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 8-bit unsigned immediate");
case Match_UImm10_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 10-bit unsigned immediate");
case Match_SImm10_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 10-bit signed immediate");
case Match_SImm11_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 11-bit signed immediate");
case Match_UImm16:
case Match_UImm16_Relaxed:
case Match_UImm16_AltRelaxed:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 16-bit unsigned immediate");
case Match_SImm16:
case Match_SImm16_Relaxed:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 16-bit signed immediate");
case Match_SImm19_Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected both 19-bit signed immediate and multiple of 4");
case Match_UImm20_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 20-bit unsigned immediate");
case Match_UImm26_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 26-bit unsigned immediate");
case Match_SImm32:
case Match_SImm32_Relaxed:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 32-bit signed immediate");
case Match_UImm32_Coerced:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 32-bit immediate");
case Match_MemSImm9:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 9-bit signed offset");
case Match_MemSImm10:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 10-bit signed offset");
case Match_MemSImm10Lsl1:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 11-bit signed offset and multiple of 2");
case Match_MemSImm10Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 12-bit signed offset and multiple of 4");
case Match_MemSImm10Lsl3:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 13-bit signed offset and multiple of 8");
case Match_MemSImm11:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 11-bit signed offset");
case Match_MemSImm12:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 12-bit signed offset");
case Match_MemSImm16:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 16-bit signed offset");
case Match_MemSImmPtr:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected memory with 32-bit signed offset");
case Match_RequiresPosSizeRange0_32: {
SMLoc ErrorStart = Operands[3]->getStartLoc();
SMLoc ErrorEnd = Operands[4]->getEndLoc();
return Error(ErrorStart, "size plus position are not in the range 0 .. 32",
SMRange(ErrorStart, ErrorEnd));
}
case Match_RequiresPosSizeUImm6: {
SMLoc ErrorStart = Operands[3]->getStartLoc();
SMLoc ErrorEnd = Operands[4]->getEndLoc();
return Error(ErrorStart, "size plus position are not in the range 1 .. 63",
SMRange(ErrorStart, ErrorEnd));
}
case Match_RequiresPosSizeRange33_64: {
SMLoc ErrorStart = Operands[3]->getStartLoc();
SMLoc ErrorEnd = Operands[4]->getEndLoc();
return Error(ErrorStart, "size plus position are not in the range 33 .. 64",
SMRange(ErrorStart, ErrorEnd));
}
}
llvm_unreachable("Implement any new match types added!");
}
void MipsAsmParser::warnIfRegIndexIsAT(unsigned RegIndex, SMLoc Loc) {
if (RegIndex != 0 && AssemblerOptions.back()->getATRegIndex() == RegIndex)
Warning(Loc, "used $at (currently $" + Twine(RegIndex) +
") without \".set noat\"");
}
void MipsAsmParser::warnIfNoMacro(SMLoc Loc) {
if (!AssemblerOptions.back()->isMacro())
Warning(Loc, "macro instruction expanded into multiple instructions");
}
void MipsAsmParser::ConvertXWPOperands(MCInst &Inst,
const OperandVector &Operands) {
assert(
(Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM) &&
"Unexpected instruction!");
((MipsOperand &)*Operands[1]).addGPR32ZeroAsmRegOperands(Inst, 1);
int NextReg = nextReg(((MipsOperand &)*Operands[1]).getGPR32Reg());
Inst.addOperand(MCOperand::createReg(NextReg));
((MipsOperand &)*Operands[2]).addMemOperands(Inst, 2);
}
void
MipsAsmParser::printWarningWithFixIt(const Twine &Msg, const Twine &FixMsg,
SMRange Range, bool ShowColors) {
getSourceManager().PrintMessage(Range.Start, SourceMgr::DK_Warning, Msg,
Range, SMFixIt(Range, FixMsg),
ShowColors);
}
int MipsAsmParser::matchCPURegisterName(StringRef Name) {
int CC;
CC = StringSwitch<unsigned>(Name)
.Case("zero", 0)
.Cases("at", "AT", 1)
.Case("a0", 4)
.Case("a1", 5)
.Case("a2", 6)
.Case("a3", 7)
.Case("v0", 2)
.Case("v1", 3)
.Case("s0", 16)
.Case("s1", 17)
.Case("s2", 18)
.Case("s3", 19)
.Case("s4", 20)
.Case("s5", 21)
.Case("s6", 22)
.Case("s7", 23)
.Case("k0", 26)
.Case("k1", 27)
.Case("gp", 28)
.Case("sp", 29)
.Case("fp", 30)
.Case("s8", 30)
.Case("ra", 31)
.Case("t0", 8)
.Case("t1", 9)
.Case("t2", 10)
.Case("t3", 11)
.Case("t4", 12)
.Case("t5", 13)
.Case("t6", 14)
.Case("t7", 15)
.Case("t8", 24)
.Case("t9", 25)
.Default(-1);
if (!(isABI_N32() || isABI_N64()))
return CC;
if (12 <= CC && CC <= 15) {
// Name is one of t4-t7
AsmToken RegTok = getLexer().peekTok();
SMRange RegRange = RegTok.getLocRange();
StringRef FixedName = StringSwitch<StringRef>(Name)
.Case("t4", "t0")
.Case("t5", "t1")
.Case("t6", "t2")
.Case("t7", "t3")
.Default("");
assert(FixedName != "" && "Register name is not one of t4-t7.");
printWarningWithFixIt("register names $t4-$t7 are only available in O32.",
"Did you mean $" + FixedName + "?", RegRange);
}
// Although SGI documentation just cuts out t0-t3 for n32/n64,
// GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7
// We are supporting both cases, so for t0-t3 we'll just push them to t4-t7.
if (8 <= CC && CC <= 11)
CC += 4;
if (CC == -1)
CC = StringSwitch<unsigned>(Name)
.Case("a4", 8)
.Case("a5", 9)
.Case("a6", 10)
.Case("a7", 11)
.Case("kt0", 26)
.Case("kt1", 27)
.Default(-1);
return CC;
}
int MipsAsmParser::matchHWRegsRegisterName(StringRef Name) {
int CC;
CC = StringSwitch<unsigned>(Name)
.Case("hwr_cpunum", 0)
.Case("hwr_synci_step", 1)
.Case("hwr_cc", 2)
.Case("hwr_ccres", 3)
.Case("hwr_ulr", 29)
.Default(-1);
return CC;
}
int MipsAsmParser::matchFPURegisterName(StringRef Name) {
if (Name[0] == 'f') {
StringRef NumString = Name.substr(1);
unsigned IntVal;
if (NumString.getAsInteger(10, IntVal))
return -1; // This is not an integer.
if (IntVal > 31) // Maximum index for fpu register.
return -1;
return IntVal;
}
return -1;
}
int MipsAsmParser::matchFCCRegisterName(StringRef Name) {
if (Name.startswith("fcc")) {
StringRef NumString = Name.substr(3);
unsigned IntVal;
if (NumString.getAsInteger(10, IntVal))
return -1; // This is not an integer.
if (IntVal > 7) // There are only 8 fcc registers.
return -1;
return IntVal;
}
return -1;
}
int MipsAsmParser::matchACRegisterName(StringRef Name) {
if (Name.startswith("ac")) {
StringRef NumString = Name.substr(2);
unsigned IntVal;
if (NumString.getAsInteger(10, IntVal))
return -1; // This is not an integer.
if (IntVal > 3) // There are only 3 acc registers.
return -1;
return IntVal;
}
return -1;
}
int MipsAsmParser::matchMSA128RegisterName(StringRef Name) {
unsigned IntVal;
if (Name.front() != 'w' || Name.drop_front(1).getAsInteger(10, IntVal))
return -1;
if (IntVal > 31)
return -1;
return IntVal;
}
int MipsAsmParser::matchMSA128CtrlRegisterName(StringRef Name) {
int CC;
CC = StringSwitch<unsigned>(Name)
.Case("msair", 0)
.Case("msacsr", 1)
.Case("msaaccess", 2)
.Case("msasave", 3)
.Case("msamodify", 4)
.Case("msarequest", 5)
.Case("msamap", 6)
.Case("msaunmap", 7)
.Default(-1);
return CC;
}
bool MipsAsmParser::canUseATReg() {
return AssemblerOptions.back()->getATRegIndex() != 0;
}
unsigned MipsAsmParser::getATReg(SMLoc Loc) {
unsigned ATIndex = AssemblerOptions.back()->getATRegIndex();
if (ATIndex == 0) {
reportParseError(Loc,
"pseudo-instruction requires $at, which is not available");
return 0;
}
unsigned AT = getReg(
(isGP64bit()) ? Mips::GPR64RegClassID : Mips::GPR32RegClassID, ATIndex);
return AT;
}
unsigned MipsAsmParser::getReg(int RC, int RegNo) {
return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo);
}
bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
MCAsmParser &Parser = getParser();
LLVM_DEBUG(dbgs() << "parseOperand\n");
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
if (ResTy == MatchOperand_Success)
return false;
// If there wasn't a custom match, try the generic matcher below. Otherwise,
// there was a match, but an error occurred, in which case, just return that
// the operand parsing failed.
if (ResTy == MatchOperand_ParseFail)
return true;
LLVM_DEBUG(dbgs() << ".. Generic Parser\n");
switch (getLexer().getKind()) {
case AsmToken::Dollar: {
// Parse the register.
SMLoc S = Parser.getTok().getLoc();
// Almost all registers have been parsed by custom parsers. There is only
// one exception to this. $zero (and it's alias $0) will reach this point
// for div, divu, and similar instructions because it is not an operand
// to the instruction definition but an explicit register. Special case
// this situation for now.
if (parseAnyRegister(Operands) != MatchOperand_NoMatch)
return false;
// Maybe it is a symbol reference.
StringRef Identifier;
if (Parser.parseIdentifier(Identifier))
return true;
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
MCSymbol *Sym = getContext().getOrCreateSymbol("$" + Identifier);
// Otherwise create a symbol reference.
const MCExpr *Res =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
Operands.push_back(MipsOperand::CreateImm(Res, S, E, *this));
return false;
}
default: {
LLVM_DEBUG(dbgs() << ".. generic integer expression\n");
const MCExpr *Expr;
SMLoc S = Parser.getTok().getLoc(); // Start location of the operand.
if (getParser().parseExpression(Expr))
return true;
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(Expr, S, E, *this));
return false;
}
} // switch(getLexer().getKind())
return true;
}
bool MipsAsmParser::isEvaluated(const MCExpr *Expr) {
switch (Expr->getKind()) {
case MCExpr::Constant:
return true;
case MCExpr::SymbolRef:
return (cast<MCSymbolRefExpr>(Expr)->getKind() != MCSymbolRefExpr::VK_None);
case MCExpr::Binary: {
const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
if (!isEvaluated(BE->getLHS()))
return false;
return isEvaluated(BE->getRHS());
}
case MCExpr::Unary:
return isEvaluated(cast<MCUnaryExpr>(Expr)->getSubExpr());
case MCExpr::Target:
return true;
}
return false;
}
bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) {
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
OperandMatchResultTy ResTy = parseAnyRegister(Operands);
if (ResTy == MatchOperand_Success) {
assert(Operands.size() == 1);
MipsOperand &Operand = static_cast<MipsOperand &>(*Operands.front());
StartLoc = Operand.getStartLoc();
EndLoc = Operand.getEndLoc();
// AFAIK, we only support numeric registers and named GPR's in CFI
// directives.
// Don't worry about eating tokens before failing. Using an unrecognised
// register is a parse error.
if (Operand.isGPRAsmReg()) {
// Resolve to GPR32 or GPR64 appropriately.
RegNo = isGP64bit() ? Operand.getGPR64Reg() : Operand.getGPR32Reg();
}
return (RegNo == (unsigned)-1);
}
assert(Operands.size() == 0);
return (RegNo == (unsigned)-1);
}
bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
SMLoc S;
if (isParenExpr)
return getParser().parseParenExprOfDepth(0, Res, S);
return getParser().parseExpression(Res);
}
OperandMatchResultTy
MipsAsmParser::parseMemOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
LLVM_DEBUG(dbgs() << "parseMemOperand\n");
const MCExpr *IdVal = nullptr;
SMLoc S;
bool isParenExpr = false;
OperandMatchResultTy Res = MatchOperand_NoMatch;
// First operand is the offset.
S = Parser.getTok().getLoc();
if (getLexer().getKind() == AsmToken::LParen) {
Parser.Lex();
isParenExpr = true;
}
if (getLexer().getKind() != AsmToken::Dollar) {
if (parseMemOffset(IdVal, isParenExpr))
return MatchOperand_ParseFail;
const AsmToken &Tok = Parser.getTok(); // Get the next token.
if (Tok.isNot(AsmToken::LParen)) {
MipsOperand &Mnemonic = static_cast<MipsOperand &>(*Operands[0]);
if (Mnemonic.getToken() == "la" || Mnemonic.getToken() == "dla") {
SMLoc E =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this));
return MatchOperand_Success;
}
if (Tok.is(AsmToken::EndOfStatement)) {
SMLoc E =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
// Zero register assumed, add a memory operand with ZERO as its base.
// "Base" will be managed by k_Memory.
auto Base = MipsOperand::createGPRReg(
0, "0", getContext().getRegisterInfo(), S, E, *this);
Operands.push_back(
MipsOperand::CreateMem(std::move(Base), IdVal, S, E, *this));
return MatchOperand_Success;
}
MCBinaryExpr::Opcode Opcode;
// GAS and LLVM treat comparison operators different. GAS will generate -1
// or 0, while LLVM will generate 0 or 1. Since a comparsion operator is
// highly unlikely to be found in a memory offset expression, we don't
// handle them.
switch (Tok.getKind()) {
case AsmToken::Plus:
Opcode = MCBinaryExpr::Add;
Parser.Lex();
break;
case AsmToken::Minus:
Opcode = MCBinaryExpr::Sub;
Parser.Lex();
break;
case AsmToken::Star:
Opcode = MCBinaryExpr::Mul;
Parser.Lex();
break;
case AsmToken::Pipe:
Opcode = MCBinaryExpr::Or;
Parser.Lex();
break;
case AsmToken::Amp:
Opcode = MCBinaryExpr::And;
Parser.Lex();
break;
case AsmToken::LessLess:
Opcode = MCBinaryExpr::Shl;
Parser.Lex();
break;
case AsmToken::GreaterGreater:
Opcode = MCBinaryExpr::LShr;
Parser.Lex();
break;
case AsmToken::Caret:
Opcode = MCBinaryExpr::Xor;
Parser.Lex();
break;
case AsmToken::Slash:
Opcode = MCBinaryExpr::Div;
Parser.Lex();
break;
case AsmToken::Percent:
Opcode = MCBinaryExpr::Mod;
Parser.Lex();
break;
default:
Error(Parser.getTok().getLoc(), "'(' or expression expected");
return MatchOperand_ParseFail;
}
const MCExpr * NextExpr;
if (getParser().parseExpression(NextExpr))
return MatchOperand_ParseFail;
IdVal = MCBinaryExpr::create(Opcode, IdVal, NextExpr, getContext());
}
Parser.Lex(); // Eat the '(' token.
}
Res = parseAnyRegister(Operands);
if (Res != MatchOperand_Success)
return Res;
if (Parser.getTok().isNot(AsmToken::RParen)) {
Error(Parser.getTok().getLoc(), "')' expected");
return MatchOperand_ParseFail;
}
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Parser.Lex(); // Eat the ')' token.
if (!IdVal)
IdVal = MCConstantExpr::create(0, getContext());
// Replace the register operand with the memory operand.
std::unique_ptr<MipsOperand> op(
static_cast<MipsOperand *>(Operands.back().release()));
// Remove the register from the operands.
// "op" will be managed by k_Memory.
Operands.pop_back();
// Add the memory operand.
if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(IdVal)) {
int64_t Imm;
if (IdVal->evaluateAsAbsolute(Imm))
IdVal = MCConstantExpr::create(Imm, getContext());
else if (BE->getLHS()->getKind() != MCExpr::SymbolRef)
IdVal = MCBinaryExpr::create(BE->getOpcode(), BE->getRHS(), BE->getLHS(),
getContext());
}
Operands.push_back(MipsOperand::CreateMem(std::move(op), IdVal, S, E, *this));
return MatchOperand_Success;
}
bool MipsAsmParser::searchSymbolAlias(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
MCSymbol *Sym = getContext().lookupSymbol(Parser.getTok().getIdentifier());
if (!Sym)
return false;
SMLoc S = Parser.getTok().getLoc();
if (Sym->isVariable()) {
const MCExpr *Expr = Sym->getVariableValue();
if (Expr->getKind() == MCExpr::SymbolRef) {
const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
StringRef DefSymbol = Ref->getSymbol().getName();
if (DefSymbol.startswith("$")) {
OperandMatchResultTy ResTy =
matchAnyRegisterNameWithoutDollar(Operands, DefSymbol.substr(1), S);
if (ResTy == MatchOperand_Success) {
Parser.Lex();
return true;
}
if (ResTy == MatchOperand_ParseFail)
llvm_unreachable("Should never ParseFail");
}
}
} else if (Sym->isUnset()) {
// If symbol is unset, it might be created in the `parseSetAssignment`
// routine as an alias for a numeric register name.
// Lookup in the aliases list.
auto Entry = RegisterSets.find(Sym->getName());
if (Entry != RegisterSets.end()) {
OperandMatchResultTy ResTy =
matchAnyRegisterWithoutDollar(Operands, Entry->getValue(), S);
if (ResTy == MatchOperand_Success) {
Parser.Lex();
return true;
}
}
}
return false;
}
OperandMatchResultTy
MipsAsmParser::matchAnyRegisterNameWithoutDollar(OperandVector &Operands,
StringRef Identifier,
SMLoc S) {
int Index = matchCPURegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createGPRReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchHWRegsRegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createHWRegsReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchFPURegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createFGRReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchFCCRegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createFCCReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchACRegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createACCReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchMSA128RegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createMSA128Reg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
Index = matchMSA128CtrlRegisterName(Identifier);
if (Index != -1) {
Operands.push_back(MipsOperand::createMSACtrlReg(
Index, Identifier, getContext().getRegisterInfo(), S,
getLexer().getLoc(), *this));
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
OperandMatchResultTy
MipsAsmParser::matchAnyRegisterWithoutDollar(OperandVector &Operands,
const AsmToken &Token, SMLoc S) {
if (Token.is(AsmToken::Identifier)) {
LLVM_DEBUG(dbgs() << ".. identifier\n");
StringRef Identifier = Token.getIdentifier();
OperandMatchResultTy ResTy =
matchAnyRegisterNameWithoutDollar(Operands, Identifier, S);
return ResTy;
} else if (Token.is(AsmToken::Integer)) {
LLVM_DEBUG(dbgs() << ".. integer\n");
int64_t RegNum = Token.getIntVal();
if (RegNum < 0 || RegNum > 31) {
// Show the error, but treat invalid register
// number as a normal one to continue parsing
// and catch other possible errors.
Error(getLexer().getLoc(), "invalid register number");
}
Operands.push_back(MipsOperand::createNumericReg(
RegNum, Token.getString(), getContext().getRegisterInfo(), S,
Token.getLoc(), *this));
return MatchOperand_Success;
}
LLVM_DEBUG(dbgs() << Token.getKind() << "\n");
return MatchOperand_NoMatch;
}
OperandMatchResultTy
MipsAsmParser::matchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S) {
auto Token = getLexer().peekTok(false);
return matchAnyRegisterWithoutDollar(Operands, Token, S);
}
OperandMatchResultTy
MipsAsmParser::parseAnyRegister(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
LLVM_DEBUG(dbgs() << "parseAnyRegister\n");
auto Token = Parser.getTok();
SMLoc S = Token.getLoc();
if (Token.isNot(AsmToken::Dollar)) {
LLVM_DEBUG(dbgs() << ".. !$ -> try sym aliasing\n");
if (Token.is(AsmToken::Identifier)) {
if (searchSymbolAlias(Operands))
return MatchOperand_Success;
}
LLVM_DEBUG(dbgs() << ".. !symalias -> NoMatch\n");
return MatchOperand_NoMatch;
}
LLVM_DEBUG(dbgs() << ".. $\n");
OperandMatchResultTy ResTy = matchAnyRegisterWithoutDollar(Operands, S);
if (ResTy == MatchOperand_Success) {
Parser.Lex(); // $
Parser.Lex(); // identifier
}
return ResTy;
}
OperandMatchResultTy
MipsAsmParser::parseJumpTarget(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
LLVM_DEBUG(dbgs() << "parseJumpTarget\n");
SMLoc S = getLexer().getLoc();
// Registers are a valid target and have priority over symbols.
OperandMatchResultTy ResTy = parseAnyRegister(Operands);
if (ResTy != MatchOperand_NoMatch)
return ResTy;
// Integers and expressions are acceptable
const MCExpr *Expr = nullptr;
if (Parser.parseExpression(Expr)) {
// We have no way of knowing if a symbol was consumed so we must ParseFail
return MatchOperand_ParseFail;
}
Operands.push_back(
MipsOperand::CreateImm(Expr, S, getLexer().getLoc(), *this));
return MatchOperand_Success;
}
OperandMatchResultTy
MipsAsmParser::parseInvNum(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const MCExpr *IdVal;
// If the first token is '$' we may have register operand. We have to reject
// cases where it is not a register. Complicating the matter is that
// register names are not reserved across all ABIs.
// Peek past the dollar to see if it's a register name for this ABI.
SMLoc S = Parser.getTok().getLoc();
if (Parser.getTok().is(AsmToken::Dollar)) {
return matchCPURegisterName(Parser.getLexer().peekTok().getString()) == -1
? MatchOperand_ParseFail
: MatchOperand_NoMatch;
}
if (getParser().parseExpression(IdVal))
return MatchOperand_ParseFail;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal);
if (!MCE)
return MatchOperand_NoMatch;
int64_t Val = MCE->getValue();
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(
MCConstantExpr::create(0 - Val, getContext()), S, E, *this));
return MatchOperand_Success;
}
OperandMatchResultTy
MipsAsmParser::parseRegisterList(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SmallVector<unsigned, 10> Regs;
unsigned RegNo;
unsigned PrevReg = Mips::NoRegister;
bool RegRange = false;
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> TmpOperands;
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_ParseFail;
SMLoc S = Parser.getTok().getLoc();
while (parseAnyRegister(TmpOperands) == MatchOperand_Success) {
SMLoc E = getLexer().getLoc();
MipsOperand &Reg = static_cast<MipsOperand &>(*TmpOperands.back());
RegNo = isGP64bit() ? Reg.getGPR64Reg() : Reg.getGPR32Reg();
if (RegRange) {
// Remove last register operand because registers from register range
// should be inserted first.
if ((isGP64bit() && RegNo == Mips::RA_64) ||
(!isGP64bit() && RegNo == Mips::RA)) {
Regs.push_back(RegNo);
} else {
unsigned TmpReg = PrevReg + 1;
while (TmpReg <= RegNo) {
if ((((TmpReg < Mips::S0) || (TmpReg > Mips::S7)) && !isGP64bit()) ||
(((TmpReg < Mips::S0_64) || (TmpReg > Mips::S7_64)) &&
isGP64bit())) {
Error(E, "invalid register operand");
return MatchOperand_ParseFail;
}
PrevReg = TmpReg;
Regs.push_back(TmpReg++);
}
}
RegRange = false;
} else {
if ((PrevReg == Mips::NoRegister) &&
((isGP64bit() && (RegNo != Mips::S0_64) && (RegNo != Mips::RA_64)) ||
(!isGP64bit() && (RegNo != Mips::S0) && (RegNo != Mips::RA)))) {
Error(E, "$16 or $31 expected");
return MatchOperand_ParseFail;
} else if (!(((RegNo == Mips::FP || RegNo == Mips::RA ||
(RegNo >= Mips::S0 && RegNo <= Mips::S7)) &&
!isGP64bit()) ||
((RegNo == Mips::FP_64 || RegNo == Mips::RA_64 ||
(RegNo >= Mips::S0_64 && RegNo <= Mips::S7_64)) &&
isGP64bit()))) {
Error(E, "invalid register operand");
return MatchOperand_ParseFail;
} else if ((PrevReg != Mips::NoRegister) && (RegNo != PrevReg + 1) &&
((RegNo != Mips::FP && RegNo != Mips::RA && !isGP64bit()) ||
(RegNo != Mips::FP_64 && RegNo != Mips::RA_64 &&
isGP64bit()))) {
Error(E, "consecutive register numbers expected");
return MatchOperand_ParseFail;
}
Regs.push_back(RegNo);
}
if (Parser.getTok().is(AsmToken::Minus))
RegRange = true;
if (!Parser.getTok().isNot(AsmToken::Minus) &&
!Parser.getTok().isNot(AsmToken::Comma)) {
Error(E, "',' or '-' expected");
return MatchOperand_ParseFail;
}
Lex(); // Consume comma or minus
if (Parser.getTok().isNot(AsmToken::Dollar))
break;
PrevReg = RegNo;
}
SMLoc E = Parser.getTok().getLoc();
Operands.push_back(MipsOperand::CreateRegList(Regs, S, E, *this));
parseMemOperand(Operands);
return MatchOperand_Success;
}
/// Sometimes (i.e. load/stores) the operand may be followed immediately by
/// either this.
/// ::= '(', register, ')'
/// handle it before we iterate so we don't get tripped up by the lack of
/// a comma.
bool MipsAsmParser::parseParenSuffix(StringRef Name, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
if (getLexer().is(AsmToken::LParen)) {
Operands.push_back(
MipsOperand::CreateToken("(", getLexer().getLoc(), *this));
Parser.Lex();
if (parseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token in argument list");
}
if (Parser.getTok().isNot(AsmToken::RParen)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token, expected ')'");
}
Operands.push_back(
MipsOperand::CreateToken(")", getLexer().getLoc(), *this));
Parser.Lex();
}
return false;
}
/// Sometimes (i.e. in MSA) the operand may be followed immediately by
/// either one of these.
/// ::= '[', register, ']'
/// ::= '[', integer, ']'
/// handle it before we iterate so we don't get tripped up by the lack of
/// a comma.
bool MipsAsmParser::parseBracketSuffix(StringRef Name,
OperandVector &Operands) {
MCAsmParser &Parser = getParser();
if (getLexer().is(AsmToken::LBrac)) {
Operands.push_back(
MipsOperand::CreateToken("[", getLexer().getLoc(), *this));
Parser.Lex();
if (parseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token in argument list");
}
if (Parser.getTok().isNot(AsmToken::RBrac)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token, expected ']'");
}
Operands.push_back(
MipsOperand::CreateToken("]", getLexer().getLoc(), *this));
Parser.Lex();
}
return false;
}
static std::string MipsMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
unsigned VariantID = 0);
bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
LLVM_DEBUG(dbgs() << "ParseInstruction\n");
// We have reached first instruction, module directive are now forbidden.
getTargetStreamer().forbidModuleDirective();
// Check if we have valid mnemonic
if (!mnemonicIsValid(Name, 0)) {
FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
std::string Suggestion = MipsMnemonicSpellCheck(Name, FBS);
return Error(NameLoc, "unknown instruction" + Suggestion);
}
// First operand in MCInst is instruction mnemonic.
Operands.push_back(MipsOperand::CreateToken(Name, NameLoc, *this));
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
if (parseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token in argument list");
}
if (getLexer().is(AsmToken::LBrac) && parseBracketSuffix(Name, Operands))
return true;
// AFAIK, parenthesis suffixes are never on the first operand
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
// Parse and remember the operand.
if (parseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token in argument list");
}
// Parse bracket and parenthesis suffixes before we iterate
if (getLexer().is(AsmToken::LBrac)) {
if (parseBracketSuffix(Name, Operands))
return true;
} else if (getLexer().is(AsmToken::LParen) &&
parseParenSuffix(Name, Operands))
return true;
}
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, "unexpected token in argument list");
}
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
// FIXME: Given that these have the same name, these should both be
// consistent on affecting the Parser.
bool MipsAsmParser::reportParseError(Twine ErrorMsg) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, ErrorMsg);
}
bool MipsAsmParser::reportParseError(SMLoc Loc, Twine ErrorMsg) {
return Error(Loc, ErrorMsg);
}
bool MipsAsmParser::parseSetNoAtDirective() {
MCAsmParser &Parser = getParser();
// Line should look like: ".set noat".
// Set the $at register to $0.
AssemblerOptions.back()->setATRegIndex(0);
Parser.Lex(); // Eat "noat".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().emitDirectiveSetNoAt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetAtDirective() {
// Line can be: ".set at", which sets $at to $1
// or ".set at=$reg", which sets $at to $reg.
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "at".
if (getLexer().is(AsmToken::EndOfStatement)) {
// No register was specified, so we set $at to $1.
AssemblerOptions.back()->setATRegIndex(1);
getTargetStreamer().emitDirectiveSetAt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
if (getLexer().isNot(AsmToken::Equal)) {
reportParseError("unexpected token, expected equals sign");
return false;
}
Parser.Lex(); // Eat "=".
if (getLexer().isNot(AsmToken::Dollar)) {
if (getLexer().is(AsmToken::EndOfStatement)) {
reportParseError("no register specified");
return false;
} else {
reportParseError("unexpected token, expected dollar sign '$'");
return false;
}
}
Parser.Lex(); // Eat "$".
// Find out what "reg" is.
unsigned AtRegNo;
const AsmToken &Reg = Parser.getTok();
if (Reg.is(AsmToken::Identifier)) {
AtRegNo = matchCPURegisterName(Reg.getIdentifier());
} else if (Reg.is(AsmToken::Integer)) {
AtRegNo = Reg.getIntVal();
} else {
reportParseError("unexpected token, expected identifier or integer");
return false;
}
// Check if $reg is a valid register. If it is, set $at to $reg.
if (!AssemblerOptions.back()->setATRegIndex(AtRegNo)) {
reportParseError("invalid register");
return false;
}
Parser.Lex(); // Eat "reg".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().emitDirectiveSetAtWithArg(AtRegNo);
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetReorderDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
AssemblerOptions.back()->setReorder();
getTargetStreamer().emitDirectiveSetReorder();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoReorderDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
AssemblerOptions.back()->setNoReorder();
getTargetStreamer().emitDirectiveSetNoReorder();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetMacroDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
AssemblerOptions.back()->setMacro();
getTargetStreamer().emitDirectiveSetMacro();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoMacroDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
if (AssemblerOptions.back()->isReorder()) {
reportParseError("`noreorder' must be set before `nomacro'");
return false;
}
AssemblerOptions.back()->setNoMacro();
getTargetStreamer().emitDirectiveSetNoMacro();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetMsaDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
setFeatureBits(Mips::FeatureMSA, "msa");
getTargetStreamer().emitDirectiveSetMsa();
return false;
}
bool MipsAsmParser::parseSetNoMsaDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
clearFeatureBits(Mips::FeatureMSA, "msa");
getTargetStreamer().emitDirectiveSetNoMsa();
return false;
}
bool MipsAsmParser::parseSetNoDspDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "nodsp".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureDSP, "dsp");
getTargetStreamer().emitDirectiveSetNoDsp();
return false;
}
bool MipsAsmParser::parseSetMips16Directive() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "mips16".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
setFeatureBits(Mips::FeatureMips16, "mips16");
getTargetStreamer().emitDirectiveSetMips16();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoMips16Directive() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "nomips16".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureMips16, "mips16");
getTargetStreamer().emitDirectiveSetNoMips16();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetFpDirective() {
MCAsmParser &Parser = getParser();
MipsABIFlagsSection::FpABIKind FpAbiVal;
// Line can be: .set fp=32
// .set fp=xx
// .set fp=64
Parser.Lex(); // Eat fp token
AsmToken Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Equal)) {
reportParseError("unexpected token, expected equals sign '='");
return false;
}
Parser.Lex(); // Eat '=' token.
Tok = Parser.getTok();
if (!parseFpABIValue(FpAbiVal, ".set"))
return false;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().emitDirectiveSetFp(FpAbiVal);
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetOddSPRegDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "oddspreg".
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
getTargetStreamer().emitDirectiveSetOddSPReg();
return false;
}
bool MipsAsmParser::parseSetNoOddSPRegDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "nooddspreg".
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
setFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
getTargetStreamer().emitDirectiveSetNoOddSPReg();
return false;
}
bool MipsAsmParser::parseSetMtDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "mt".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
setFeatureBits(Mips::FeatureMT, "mt");
getTargetStreamer().emitDirectiveSetMt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoMtDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "nomt".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureMT, "mt");
getTargetStreamer().emitDirectiveSetNoMt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoCRCDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "nocrc".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureCRC, "crc");
getTargetStreamer().emitDirectiveSetNoCRC();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoVirtDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "novirt".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureVirt, "virt");
getTargetStreamer().emitDirectiveSetNoVirt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetNoGINVDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex(); // Eat "noginv".
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
clearFeatureBits(Mips::FeatureGINV, "ginv");
getTargetStreamer().emitDirectiveSetNoGINV();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetPopDirective() {
MCAsmParser &Parser = getParser();
SMLoc Loc = getLexer().getLoc();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
// Always keep an element on the options "stack" to prevent the user
// from changing the initial options. This is how we remember them.
if (AssemblerOptions.size() == 2)
return reportParseError(Loc, ".set pop with no .set push");
MCSubtargetInfo &STI = copySTI();
AssemblerOptions.pop_back();
setAvailableFeatures(
ComputeAvailableFeatures(AssemblerOptions.back()->getFeatures()));
STI.setFeatureBits(AssemblerOptions.back()->getFeatures());
getTargetStreamer().emitDirectiveSetPop();
return false;
}
bool MipsAsmParser::parseSetPushDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
// Create a copy of the current assembler options environment and push it.
AssemblerOptions.push_back(
llvm::make_unique<MipsAssemblerOptions>(AssemblerOptions.back().get()));
getTargetStreamer().emitDirectiveSetPush();
return false;
}
bool MipsAsmParser::parseSetSoftFloatDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
setFeatureBits(Mips::FeatureSoftFloat, "soft-float");
getTargetStreamer().emitDirectiveSetSoftFloat();
return false;
}
bool MipsAsmParser::parseSetHardFloatDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
clearFeatureBits(Mips::FeatureSoftFloat, "soft-float");
getTargetStreamer().emitDirectiveSetHardFloat();
return false;
}
bool MipsAsmParser::parseSetAssignment() {
StringRef Name;
MCAsmParser &Parser = getParser();
if (Parser.parseIdentifier(Name))
return reportParseError("expected identifier after .set");
if (getLexer().isNot(AsmToken::Comma))
return reportParseError("unexpected token, expected comma");
Lex(); // Eat comma
if (getLexer().is(AsmToken::Dollar) &&
getLexer().peekTok().is(AsmToken::Integer)) {
// Parse assignment of a numeric register:
// .set r1,$1
Parser.Lex(); // Eat $.
RegisterSets[Name] = Parser.getTok();
Parser.Lex(); // Eat identifier.
getContext().getOrCreateSymbol(Name);
return false;
}
MCSymbol *Sym;
const MCExpr *Value;
if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
Parser, Sym, Value))
return true;
Sym->setVariableValue(Value);
return false;
}
bool MipsAsmParser::parseSetMips0Directive() {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
// Reset assembler options to their initial values.
MCSubtargetInfo &STI = copySTI();
setAvailableFeatures(
ComputeAvailableFeatures(AssemblerOptions.front()->getFeatures()));
STI.setFeatureBits(AssemblerOptions.front()->getFeatures());
AssemblerOptions.back()->setFeatures(AssemblerOptions.front()->getFeatures());
getTargetStreamer().emitDirectiveSetMips0();
return false;
}
bool MipsAsmParser::parseSetArchDirective() {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::Equal))
return reportParseError("unexpected token, expected equals sign");
Parser.Lex();
StringRef Arch = getParser().parseStringToEndOfStatement().trim();
if (Arch.empty())
return reportParseError("expected arch identifier");
StringRef ArchFeatureName =
StringSwitch<StringRef>(Arch)
.Case("mips1", "mips1")
.Case("mips2", "mips2")
.Case("mips3", "mips3")
.Case("mips4", "mips4")
.Case("mips5", "mips5")
.Case("mips32", "mips32")
.Case("mips32r2", "mips32r2")
.Case("mips32r3", "mips32r3")
.Case("mips32r5", "mips32r5")
.Case("mips32r6", "mips32r6")
.Case("mips64", "mips64")
.Case("mips64r2", "mips64r2")
.Case("mips64r3", "mips64r3")
.Case("mips64r5", "mips64r5")
.Case("mips64r6", "mips64r6")
.Case("octeon", "cnmips")
.Case("octeon+", "cnmipsp")
.Case("r4000", "mips3") // This is an implementation of Mips3.
.Default("");
if (ArchFeatureName.empty())
return reportParseError("unsupported architecture");
if (ArchFeatureName == "mips64r6" && inMicroMipsMode())
return reportParseError("mips64r6 does not support microMIPS");
selectArch(ArchFeatureName);
getTargetStreamer().emitDirectiveSetArch(Arch);
return false;
}
bool MipsAsmParser::parseSetFeature(uint64_t Feature) {
MCAsmParser &Parser = getParser();
Parser.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return reportParseError("unexpected token, expected end of statement");
switch (Feature) {
default:
llvm_unreachable("Unimplemented feature");
case Mips::FeatureDSP:
setFeatureBits(Mips::FeatureDSP, "dsp");
getTargetStreamer().emitDirectiveSetDsp();
break;
case Mips::FeatureDSPR2:
setFeatureBits(Mips::FeatureDSPR2, "dspr2");
getTargetStreamer().emitDirectiveSetDspr2();
break;
case Mips::FeatureMicroMips:
setFeatureBits(Mips::FeatureMicroMips, "micromips");
getTargetStreamer().emitDirectiveSetMicroMips();
break;
case Mips::FeatureMips1:
selectArch("mips1");
getTargetStreamer().emitDirectiveSetMips1();
break;
case Mips::FeatureMips2:
selectArch("mips2");
getTargetStreamer().emitDirectiveSetMips2();
break;
case Mips::FeatureMips3:
selectArch("mips3");
getTargetStreamer().emitDirectiveSetMips3();
break;
case Mips::FeatureMips4:
selectArch("mips4");
getTargetStreamer().emitDirectiveSetMips4();
break;
case Mips::FeatureMips5:
selectArch("mips5");
getTargetStreamer().emitDirectiveSetMips5();
break;
case Mips::FeatureMips32:
selectArch("mips32");
getTargetStreamer().emitDirectiveSetMips32();
break;
case Mips::FeatureMips32r2:
selectArch("mips32r2");
getTargetStreamer().emitDirectiveSetMips32R2();
break;
case Mips::FeatureMips32r3:
selectArch("mips32r3");
getTargetStreamer().emitDirectiveSetMips32R3();
break;
case Mips::FeatureMips32r5:
selectArch("mips32r5");
getTargetStreamer().emitDirectiveSetMips32R5();
break;
case Mips::FeatureMips32r6:
selectArch("mips32r6");
getTargetStreamer().emitDirectiveSetMips32R6();
break;
case Mips::FeatureMips64:
selectArch("mips64");
getTargetStreamer().emitDirectiveSetMips64();
break;
case Mips::FeatureMips64r2:
selectArch("mips64r2");
getTargetStreamer().emitDirectiveSetMips64R2();
break;
case Mips::FeatureMips64r3:
selectArch("mips64r3");
getTargetStreamer().emitDirectiveSetMips64R3();
break;
case Mips::FeatureMips64r5:
selectArch("mips64r5");
getTargetStreamer().emitDirectiveSetMips64R5();
break;
case Mips::FeatureMips64r6:
selectArch("mips64r6");
getTargetStreamer().emitDirectiveSetMips64R6();
break;
case Mips::FeatureCRC:
setFeatureBits(Mips::FeatureCRC, "crc");
getTargetStreamer().emitDirectiveSetCRC();
break;
case Mips::FeatureVirt:
setFeatureBits(Mips::FeatureVirt, "virt");
getTargetStreamer().emitDirectiveSetVirt();
break;
case Mips::FeatureGINV:
setFeatureBits(Mips::FeatureGINV, "ginv");
getTargetStreamer().emitDirectiveSetGINV();
break;
}
return false;
}
bool MipsAsmParser::eatComma(StringRef ErrorStr) {
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::Comma)) {
SMLoc Loc = getLexer().getLoc();
return Error(Loc, ErrorStr);
}
Parser.Lex(); // Eat the comma.
return true;
}
// Used to determine if .cpload, .cprestore, and .cpsetup have any effect.
// In this class, it is only used for .cprestore.
// FIXME: Only keep track of IsPicEnabled in one place, instead of in both
// MipsTargetELFStreamer and MipsAsmParser.
bool MipsAsmParser::isPicAndNotNxxAbi() {
return inPicMode() && !(isABI_N32() || isABI_N64());
}
bool MipsAsmParser::parseDirectiveCpLoad(SMLoc Loc) {
if (AssemblerOptions.back()->isReorder())
Warning(Loc, ".cpload should be inside a noreorder section");
if (inMips16Mode()) {
reportParseError(".cpload is not supported in Mips16 mode");
return false;
}
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Reg;
OperandMatchResultTy ResTy = parseAnyRegister(Reg);
if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
reportParseError("expected register containing function address");
return false;
}
MipsOperand &RegOpnd = static_cast<MipsOperand &>(*Reg[0]);
if (!RegOpnd.isGPRAsmReg()) {
reportParseError(RegOpnd.getStartLoc(), "invalid register");
return false;
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().emitDirectiveCpLoad(RegOpnd.getGPR32Reg());
return false;
}
bool MipsAsmParser::parseDirectiveCpLocal(SMLoc Loc) {
if (!isABI_N32() && !isABI_N64()) {
reportParseError(".cplocal is allowed only in N32 or N64 mode");
return false;
}
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Reg;
OperandMatchResultTy ResTy = parseAnyRegister(Reg);
if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
reportParseError("expected register containing global pointer");
return false;
}
MipsOperand &RegOpnd = static_cast<MipsOperand &>(*Reg[0]);
if (!RegOpnd.isGPRAsmReg()) {
reportParseError(RegOpnd.getStartLoc(), "invalid register");
return false;
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getParser().Lex(); // Consume the EndOfStatement.
unsigned NewReg = RegOpnd.getGPR32Reg();
if (IsPicEnabled)
GPReg = NewReg;
getTargetStreamer().emitDirectiveCpLocal(NewReg);
return false;
}
bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) {
MCAsmParser &Parser = getParser();
// Note that .cprestore is ignored if used with the N32 and N64 ABIs or if it
// is used in non-PIC mode.
if (inMips16Mode()) {
reportParseError(".cprestore is not supported in Mips16 mode");
return false;
}
// Get the stack offset value.
const MCExpr *StackOffset;
int64_t StackOffsetVal;
if (Parser.parseExpression(StackOffset)) {
reportParseError("expected stack offset value");
return false;
}
if (!StackOffset->evaluateAsAbsolute(StackOffsetVal)) {
reportParseError("stack offset is not an absolute expression");
return false;
}
if (StackOffsetVal < 0) {
Warning(Loc, ".cprestore with negative stack offset has no effect");
IsCpRestoreSet = false;
} else {
IsCpRestoreSet = true;
CpRestoreOffset = StackOffsetVal;
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
if (!getTargetStreamer().emitDirectiveCpRestore(
CpRestoreOffset, [&]() { return getATReg(Loc); }, Loc, STI))
return true;
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseDirectiveCPSetup() {
MCAsmParser &Parser = getParser();
unsigned FuncReg;
unsigned Save;
bool SaveIsReg = true;
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> TmpReg;
OperandMatchResultTy ResTy = parseAnyRegister(TmpReg);
if (ResTy == MatchOperand_NoMatch) {
reportParseError("expected register containing function address");
return false;
}
MipsOperand &FuncRegOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
if (!FuncRegOpnd.isGPRAsmReg()) {
reportParseError(FuncRegOpnd.getStartLoc(), "invalid register");
return false;
}
FuncReg = FuncRegOpnd.getGPR32Reg();
TmpReg.clear();
if (!eatComma("unexpected token, expected comma"))
return true;
ResTy = parseAnyRegister(TmpReg);
if (ResTy == MatchOperand_NoMatch) {
const MCExpr *OffsetExpr;
int64_t OffsetVal;
SMLoc ExprLoc = getLexer().getLoc();
if (Parser.parseExpression(OffsetExpr) ||
!OffsetExpr->evaluateAsAbsolute(OffsetVal)) {
reportParseError(ExprLoc, "expected save register or stack offset");
return false;
}
Save = OffsetVal;
SaveIsReg = false;
} else {
MipsOperand &SaveOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
if (!SaveOpnd.isGPRAsmReg()) {
reportParseError(SaveOpnd.getStartLoc(), "invalid register");
return false;
}
Save = SaveOpnd.getGPR32Reg();
}
if (!eatComma("unexpected token, expected comma"))
return true;
const MCExpr *Expr;
if (Parser.parseExpression(Expr)) {
reportParseError("expected expression");
return false;
}
if (Expr->getKind() != MCExpr::SymbolRef) {
reportParseError("expected symbol");
return false;
}
const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
CpSaveLocation = Save;
CpSaveLocationIsRegister = SaveIsReg;
getTargetStreamer().emitDirectiveCpsetup(FuncReg, Save, Ref->getSymbol(),
SaveIsReg);
return false;
}
bool MipsAsmParser::parseDirectiveCPReturn() {
getTargetStreamer().emitDirectiveCpreturn(CpSaveLocation,
CpSaveLocationIsRegister);
return false;
}
bool MipsAsmParser::parseDirectiveNaN() {
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::EndOfStatement)) {
const AsmToken &Tok = Parser.getTok();
if (Tok.getString() == "2008") {
Parser.Lex();
getTargetStreamer().emitDirectiveNaN2008();
return false;
} else if (Tok.getString() == "legacy") {
Parser.Lex();
getTargetStreamer().emitDirectiveNaNLegacy();
return false;
}
}
// If we don't recognize the option passed to the .nan
// directive (e.g. no option or unknown option), emit an error.
reportParseError("invalid option in .nan directive");
return false;
}
bool MipsAsmParser::parseDirectiveSet() {
const AsmToken &Tok = getParser().getTok();
StringRef IdVal = Tok.getString();
SMLoc Loc = Tok.getLoc();
if (IdVal == "noat")
return parseSetNoAtDirective();
if (IdVal == "at")
return parseSetAtDirective();
if (IdVal == "arch")
return parseSetArchDirective();
if (IdVal == "bopt") {
Warning(Loc, "'bopt' feature is unsupported");
getParser().Lex();
return false;
}
if (IdVal == "nobopt") {
// We're already running in nobopt mode, so nothing to do.
getParser().Lex();
return false;
}
if (IdVal == "fp")
return parseSetFpDirective();
if (IdVal == "oddspreg")
return parseSetOddSPRegDirective();
if (IdVal == "nooddspreg")
return parseSetNoOddSPRegDirective();
if (IdVal == "pop")
return parseSetPopDirective();
if (IdVal == "push")
return parseSetPushDirective();
if (IdVal == "reorder")
return parseSetReorderDirective();
if (IdVal == "noreorder")
return parseSetNoReorderDirective();
if (IdVal == "macro")
return parseSetMacroDirective();
if (IdVal == "nomacro")
return parseSetNoMacroDirective();
if (IdVal == "mips16")
return parseSetMips16Directive();
if (IdVal == "nomips16")
return parseSetNoMips16Directive();
if (IdVal == "nomicromips") {
clearFeatureBits(Mips::FeatureMicroMips, "micromips");
getTargetStreamer().emitDirectiveSetNoMicroMips();
getParser().eatToEndOfStatement();
return false;
}
if (IdVal == "micromips") {
if (hasMips64r6()) {
Error(Loc, ".set micromips directive is not supported with MIPS64R6");
return false;
}
return parseSetFeature(Mips::FeatureMicroMips);
}
if (IdVal == "mips0")
return parseSetMips0Directive();
if (IdVal == "mips1")
return parseSetFeature(Mips::FeatureMips1);
if (IdVal == "mips2")
return parseSetFeature(Mips::FeatureMips2);
if (IdVal == "mips3")
return parseSetFeature(Mips::FeatureMips3);
if (IdVal == "mips4")
return parseSetFeature(Mips::FeatureMips4);
if (IdVal == "mips5")
return parseSetFeature(Mips::FeatureMips5);
if (IdVal == "mips32")
return parseSetFeature(Mips::FeatureMips32);
if (IdVal == "mips32r2")
return parseSetFeature(Mips::FeatureMips32r2);
if (IdVal == "mips32r3")
return parseSetFeature(Mips::FeatureMips32r3);
if (IdVal == "mips32r5")
return parseSetFeature(Mips::FeatureMips32r5);
if (IdVal == "mips32r6")
return parseSetFeature(Mips::FeatureMips32r6);
if (IdVal == "mips64")
return parseSetFeature(Mips::FeatureMips64);
if (IdVal == "mips64r2")
return parseSetFeature(Mips::FeatureMips64r2);
if (IdVal == "mips64r3")
return parseSetFeature(Mips::FeatureMips64r3);
if (IdVal == "mips64r5")
return parseSetFeature(Mips::FeatureMips64r5);
if (IdVal == "mips64r6") {
if (inMicroMipsMode()) {
Error(Loc, "MIPS64R6 is not supported with microMIPS");
return false;
}
return parseSetFeature(Mips::FeatureMips64r6);
}
if (IdVal == "dsp")
return parseSetFeature(Mips::FeatureDSP);
if (IdVal == "dspr2")
return parseSetFeature(Mips::FeatureDSPR2);
if (IdVal == "nodsp")
return parseSetNoDspDirective();
if (IdVal == "msa")
return parseSetMsaDirective();
if (IdVal == "nomsa")
return parseSetNoMsaDirective();
if (IdVal == "mt")
return parseSetMtDirective();
if (IdVal == "nomt")
return parseSetNoMtDirective();
if (IdVal == "softfloat")
return parseSetSoftFloatDirective();
if (IdVal == "hardfloat")
return parseSetHardFloatDirective();
if (IdVal == "crc")
return parseSetFeature(Mips::FeatureCRC);
if (IdVal == "nocrc")
return parseSetNoCRCDirective();
if (IdVal == "virt")
return parseSetFeature(Mips::FeatureVirt);
if (IdVal == "novirt")
return parseSetNoVirtDirective();
if (IdVal == "ginv")
return parseSetFeature(Mips::FeatureGINV);
if (IdVal == "noginv")
return parseSetNoGINVDirective();
// It is just an identifier, look for an assignment.
return parseSetAssignment();
}
/// parseDirectiveGpWord
/// ::= .gpword local_sym
bool MipsAsmParser::parseDirectiveGpWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitGPRel32Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitGPRel32Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveGpDWord
/// ::= .gpdword local_sym
bool MipsAsmParser::parseDirectiveGpDWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitGPRel64Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitGPRel64Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveDtpRelWord
/// ::= .dtprelword tls_sym
bool MipsAsmParser::parseDirectiveDtpRelWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitDTPRel32Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitDTPRel32Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveDtpRelDWord
/// ::= .dtpreldword tls_sym
bool MipsAsmParser::parseDirectiveDtpRelDWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitDTPRel64Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitDTPRel64Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveTpRelWord
/// ::= .tprelword tls_sym
bool MipsAsmParser::parseDirectiveTpRelWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitTPRel32Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitTPRel32Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveTpRelDWord
/// ::= .tpreldword tls_sym
bool MipsAsmParser::parseDirectiveTpRelDWord() {
MCAsmParser &Parser = getParser();
const MCExpr *Value;
// EmitTPRel64Value requires an expression, so we are using base class
// method to evaluate the expression.
if (getParser().parseExpression(Value))
return true;
getParser().getStreamer().EmitTPRel64Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
}
bool MipsAsmParser::parseDirectiveOption() {
MCAsmParser &Parser = getParser();
// Get the option token.
AsmToken Tok = Parser.getTok();
// At the moment only identifiers are supported.
if (Tok.isNot(AsmToken::Identifier)) {
return Error(Parser.getTok().getLoc(),
"unexpected token, expected identifier");
}
StringRef Option = Tok.getIdentifier();
if (Option == "pic0") {
// MipsAsmParser needs to know if the current PIC mode changes.
IsPicEnabled = false;
getTargetStreamer().emitDirectiveOptionPic0();
Parser.Lex();
if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
return Error(Parser.getTok().getLoc(),
"unexpected token, expected end of statement");
}
return false;
}
if (Option == "pic2") {
// MipsAsmParser needs to know if the current PIC mode changes.
IsPicEnabled = true;
getTargetStreamer().emitDirectiveOptionPic2();
Parser.Lex();
if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
return Error(Parser.getTok().getLoc(),
"unexpected token, expected end of statement");
}
return false;
}
// Unknown option.
Warning(Parser.getTok().getLoc(),
"unknown option, expected 'pic0' or 'pic2'");
Parser.eatToEndOfStatement();
return false;
}
/// parseInsnDirective
/// ::= .insn
bool MipsAsmParser::parseInsnDirective() {
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
// The actual label marking happens in
// MipsELFStreamer::createPendingLabelRelocs().
getTargetStreamer().emitDirectiveInsn();
getParser().Lex(); // Eat EndOfStatement token.
return false;
}
/// parseRSectionDirective
/// ::= .rdata
bool MipsAsmParser::parseRSectionDirective(StringRef Section) {
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
MCSection *ELFSection = getContext().getELFSection(
Section, ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
getParser().getStreamer().SwitchSection(ELFSection);
getParser().Lex(); // Eat EndOfStatement token.
return false;
}
/// parseSSectionDirective
/// ::= .sbss
/// ::= .sdata
bool MipsAsmParser::parseSSectionDirective(StringRef Section, unsigned Type) {
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
MCSection *ELFSection = getContext().getELFSection(
Section, Type, ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_MIPS_GPREL);
getParser().getStreamer().SwitchSection(ELFSection);
getParser().Lex(); // Eat EndOfStatement token.
return false;
}
/// parseDirectiveModule
/// ::= .module oddspreg
/// ::= .module nooddspreg
/// ::= .module fp=value
/// ::= .module softfloat
/// ::= .module hardfloat
/// ::= .module mt
/// ::= .module crc
/// ::= .module nocrc
/// ::= .module virt
/// ::= .module novirt
/// ::= .module ginv
/// ::= .module noginv
bool MipsAsmParser::parseDirectiveModule() {
MCAsmParser &Parser = getParser();
MCAsmLexer &Lexer = getLexer();
SMLoc L = Lexer.getLoc();
if (!getTargetStreamer().isModuleDirectiveAllowed()) {
// TODO : get a better message.
reportParseError(".module directive must appear before any code");
return false;
}
StringRef Option;
if (Parser.parseIdentifier(Option)) {
reportParseError("expected .module option identifier");
return false;
}
if (Option == "oddspreg") {
clearModuleFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
// Synchronize the abiflags information with the FeatureBits information we
// changed above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated abiflags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted at the end).
getTargetStreamer().emitDirectiveModuleOddSPReg();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "nooddspreg") {
if (!isABI_O32()) {
return Error(L, "'.module nooddspreg' requires the O32 ABI");
}
setModuleFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
// Synchronize the abiflags information with the FeatureBits information we
// changed above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated abiflags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted at the end).
getTargetStreamer().emitDirectiveModuleOddSPReg();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "fp") {
return parseDirectiveModuleFP();
} else if (Option == "softfloat") {
setModuleFeatureBits(Mips::FeatureSoftFloat, "soft-float");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleSoftFloat();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "hardfloat") {
clearModuleFeatureBits(Mips::FeatureSoftFloat, "soft-float");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleHardFloat();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "mt") {
setModuleFeatureBits(Mips::FeatureMT, "mt");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleMT();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "crc") {
setModuleFeatureBits(Mips::FeatureCRC, "crc");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleCRC();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "nocrc") {
clearModuleFeatureBits(Mips::FeatureCRC, "crc");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleNoCRC();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "virt") {
setModuleFeatureBits(Mips::FeatureVirt, "virt");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleVirt();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "novirt") {
clearModuleFeatureBits(Mips::FeatureVirt, "virt");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleNoVirt();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "ginv") {
setModuleFeatureBits(Mips::FeatureGINV, "ginv");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleGINV();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else if (Option == "noginv") {
clearModuleFeatureBits(Mips::FeatureGINV, "ginv");
// Synchronize the ABI Flags information with the FeatureBits information we
// updated above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated ABI Flags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted later).
getTargetStreamer().emitDirectiveModuleNoGINV();
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
return false; // parseDirectiveModule has finished successfully.
} else {
return Error(L, "'" + Twine(Option) + "' is not a valid .module option.");
}
}
/// parseDirectiveModuleFP
/// ::= =32
/// ::= =xx
/// ::= =64
bool MipsAsmParser::parseDirectiveModuleFP() {
MCAsmParser &Parser = getParser();
MCAsmLexer &Lexer = getLexer();
if (Lexer.isNot(AsmToken::Equal)) {
reportParseError("unexpected token, expected equals sign '='");
return false;
}
Parser.Lex(); // Eat '=' token.
MipsABIFlagsSection::FpABIKind FpABI;
if (!parseFpABIValue(FpABI, ".module"))
return false;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
// Synchronize the abiflags information with the FeatureBits information we
// changed above.
getTargetStreamer().updateABIInfo(*this);
// If printing assembly, use the recently updated abiflags information.
// If generating ELF, don't do anything (the .MIPS.abiflags section gets
// emitted at the end).
getTargetStreamer().emitDirectiveModuleFP();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI,
StringRef Directive) {
MCAsmParser &Parser = getParser();
MCAsmLexer &Lexer = getLexer();
bool ModuleLevelOptions = Directive == ".module";
if (Lexer.is(AsmToken::Identifier)) {
StringRef Value = Parser.getTok().getString();
Parser.Lex();
if (Value != "xx") {
reportParseError("unsupported value, expected 'xx', '32' or '64'");
return false;
}
if (!isABI_O32()) {
reportParseError("'" + Directive + " fp=xx' requires the O32 ABI");
return false;
}
FpABI = MipsABIFlagsSection::FpABIKind::XX;
if (ModuleLevelOptions) {
setModuleFeatureBits(Mips::FeatureFPXX, "fpxx");
clearModuleFeatureBits(Mips::FeatureFP64Bit, "fp64");
} else {
setFeatureBits(Mips::FeatureFPXX, "fpxx");
clearFeatureBits(Mips::FeatureFP64Bit, "fp64");
}
return true;
}
if (Lexer.is(AsmToken::Integer)) {
unsigned Value = Parser.getTok().getIntVal();
Parser.Lex();
if (Value != 32 && Value != 64) {
reportParseError("unsupported value, expected 'xx', '32' or '64'");
return false;
}
if (Value == 32) {
if (!isABI_O32()) {
reportParseError("'" + Directive + " fp=32' requires the O32 ABI");
return false;
}
FpABI = MipsABIFlagsSection::FpABIKind::S32;
if (ModuleLevelOptions) {
clearModuleFeatureBits(Mips::FeatureFPXX, "fpxx");
clearModuleFeatureBits(Mips::FeatureFP64Bit, "fp64");
} else {
clearFeatureBits(Mips::FeatureFPXX, "fpxx");
clearFeatureBits(Mips::FeatureFP64Bit, "fp64");
}
} else {
FpABI = MipsABIFlagsSection::FpABIKind::S64;
if (ModuleLevelOptions) {
clearModuleFeatureBits(Mips::FeatureFPXX, "fpxx");
setModuleFeatureBits(Mips::FeatureFP64Bit, "fp64");
} else {
clearFeatureBits(Mips::FeatureFPXX, "fpxx");
setFeatureBits(Mips::FeatureFP64Bit, "fp64");
}
}
return true;
}
return false;
}
bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
// This returns false if this function recognizes the directive
// regardless of whether it is successfully handles or reports an
// error. Otherwise it returns true to give the generic parser a
// chance at recognizing it.
MCAsmParser &Parser = getParser();
StringRef IDVal = DirectiveID.getString();
if (IDVal == ".cpload") {
parseDirectiveCpLoad(DirectiveID.getLoc());
return false;
}
if (IDVal == ".cprestore") {
parseDirectiveCpRestore(DirectiveID.getLoc());
return false;
}
if (IDVal == ".cplocal") {
parseDirectiveCpLocal(DirectiveID.getLoc());
return false;
}
if (IDVal == ".ent") {
StringRef SymbolName;
if (Parser.parseIdentifier(SymbolName)) {
reportParseError("expected identifier after .ent");
return false;
}
// There's an undocumented extension that allows an integer to
// follow the name of the procedure which AFAICS is ignored by GAS.
// Example: .ent foo,2
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (getLexer().isNot(AsmToken::Comma)) {
// Even though we accept this undocumented extension for compatibility
// reasons, the additional integer argument does not actually change
// the behaviour of the '.ent' directive, so we would like to discourage
// its use. We do this by not referring to the extended version in
// error messages which are not directly related to its use.
reportParseError("unexpected token, expected end of statement");
return false;
}
Parser.Lex(); // Eat the comma.
const MCExpr *DummyNumber;
int64_t DummyNumberVal;
// If the user was explicitly trying to use the extended version,
// we still give helpful extension-related error messages.
if (Parser.parseExpression(DummyNumber)) {
reportParseError("expected number after comma");
return false;
}
if (!DummyNumber->evaluateAsAbsolute(DummyNumberVal)) {
reportParseError("expected an absolute expression after comma");
return false;
}
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
MCSymbol *Sym = getContext().getOrCreateSymbol(SymbolName);
getTargetStreamer().emitDirectiveEnt(*Sym);
CurrentFn = Sym;
IsCpRestoreSet = false;
return false;
}
if (IDVal == ".end") {
StringRef SymbolName;
if (Parser.parseIdentifier(SymbolName)) {
reportParseError("expected identifier after .end");
return false;
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
if (CurrentFn == nullptr) {
reportParseError(".end used without .ent");
return false;
}
if ((SymbolName != CurrentFn->getName())) {
reportParseError(".end symbol does not match .ent symbol");
return false;
}
getTargetStreamer().emitDirectiveEnd(SymbolName);
CurrentFn = nullptr;
IsCpRestoreSet = false;
return false;
}
if (IDVal == ".frame") {
// .frame $stack_reg, frame_size_in_bytes, $return_reg
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> TmpReg;
OperandMatchResultTy ResTy = parseAnyRegister(TmpReg);
if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
reportParseError("expected stack register");
return false;
}
MipsOperand &StackRegOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
if (!StackRegOpnd.isGPRAsmReg()) {
reportParseError(StackRegOpnd.getStartLoc(),
"expected general purpose register");
return false;
}
unsigned StackReg = StackRegOpnd.getGPR32Reg();
if (Parser.getTok().is(AsmToken::Comma))
Parser.Lex();
else {
reportParseError("unexpected token, expected comma");
return false;
}
// Parse the frame size.
const MCExpr *FrameSize;
int64_t FrameSizeVal;
if (Parser.parseExpression(FrameSize)) {
reportParseError("expected frame size value");
return false;
}
if (!FrameSize->evaluateAsAbsolute(FrameSizeVal)) {
reportParseError("frame size not an absolute expression");
return false;
}
if (Parser.getTok().is(AsmToken::Comma))
Parser.Lex();
else {
reportParseError("unexpected token, expected comma");
return false;
}
// Parse the return register.
TmpReg.clear();
ResTy = parseAnyRegister(TmpReg);
if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
reportParseError("expected return register");
return false;
}
MipsOperand &ReturnRegOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
if (!ReturnRegOpnd.isGPRAsmReg()) {
reportParseError(ReturnRegOpnd.getStartLoc(),
"expected general purpose register");
return false;
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().emitFrame(StackReg, FrameSizeVal,
ReturnRegOpnd.getGPR32Reg());
IsCpRestoreSet = false;
return false;
}
if (IDVal == ".set") {
parseDirectiveSet();
return false;
}
if (IDVal == ".mask" || IDVal == ".fmask") {
// .mask bitmask, frame_offset
// bitmask: One bit for each register used.
// frame_offset: Offset from Canonical Frame Address ($sp on entry) where
// first register is expected to be saved.
// Examples:
// .mask 0x80000000, -4
// .fmask 0x80000000, -4
//
// Parse the bitmask
const MCExpr *BitMask;
int64_t BitMaskVal;
if (Parser.parseExpression(BitMask)) {
reportParseError("expected bitmask value");
return false;
}
if (!BitMask->evaluateAsAbsolute(BitMaskVal)) {
reportParseError("bitmask not an absolute expression");
return false;
}
if (Parser.getTok().is(AsmToken::Comma))
Parser.Lex();
else {
reportParseError("unexpected token, expected comma");
return false;
}
// Parse the frame_offset
const MCExpr *FrameOffset;
int64_t FrameOffsetVal;
if (Parser.parseExpression(FrameOffset)) {
reportParseError("expected frame offset value");
return false;
}
if (!FrameOffset->evaluateAsAbsolute(FrameOffsetVal)) {
reportParseError("frame offset not an absolute expression");
return false;
}
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
if (IDVal == ".mask")
getTargetStreamer().emitMask(BitMaskVal, FrameOffsetVal);
else
getTargetStreamer().emitFMask(BitMaskVal, FrameOffsetVal);
return false;
}
if (IDVal == ".nan")
return parseDirectiveNaN();
if (IDVal == ".gpword") {
parseDirectiveGpWord();
return false;
}
if (IDVal == ".gpdword") {
parseDirectiveGpDWord();
return false;
}
if (IDVal == ".dtprelword") {
parseDirectiveDtpRelWord();
return false;
}
if (IDVal == ".dtpreldword") {
parseDirectiveDtpRelDWord();
return false;
}
if (IDVal == ".tprelword") {
parseDirectiveTpRelWord();
return false;
}
if (IDVal == ".tpreldword") {
parseDirectiveTpRelDWord();
return false;
}
if (IDVal == ".option") {
parseDirectiveOption();
return false;
}
if (IDVal == ".abicalls") {
getTargetStreamer().emitDirectiveAbiCalls();
if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
Error(Parser.getTok().getLoc(),
"unexpected token, expected end of statement");
}
return false;
}
if (IDVal == ".cpsetup") {
parseDirectiveCPSetup();
return false;
}
if (IDVal == ".cpreturn") {
parseDirectiveCPReturn();
return false;
}
if (IDVal == ".module") {
parseDirectiveModule();
return false;
}
if (IDVal == ".llvm_internal_mips_reallow_module_directive") {
parseInternalDirectiveReallowModule();
return false;
}
if (IDVal == ".insn") {
parseInsnDirective();
return false;
}
if (IDVal == ".rdata") {
parseRSectionDirective(".rodata");
return false;
}
if (IDVal == ".sbss") {
parseSSectionDirective(IDVal, ELF::SHT_NOBITS);
return false;
}
if (IDVal == ".sdata") {
parseSSectionDirective(IDVal, ELF::SHT_PROGBITS);
return false;
}
return true;
}
bool MipsAsmParser::parseInternalDirectiveReallowModule() {
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
getTargetStreamer().reallowModuleDirective();
getParser().Lex(); // Eat EndOfStatement token.
return false;
}
extern "C" void LLVMInitializeMipsAsmParser() {
RegisterMCAsmParser<MipsAsmParser> X(getTheMipsTarget());
RegisterMCAsmParser<MipsAsmParser> Y(getTheMipselTarget());
RegisterMCAsmParser<MipsAsmParser> A(getTheMips64Target());
RegisterMCAsmParser<MipsAsmParser> B(getTheMips64elTarget());
}
#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#define GET_MNEMONIC_SPELL_CHECKER
#include "MipsGenAsmMatcher.inc"
bool MipsAsmParser::mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) {
// Find the appropriate table for this asm variant.
const MatchEntry *Start, *End;
switch (VariantID) {
default: llvm_unreachable("invalid variant!");
case 0: Start = std::begin(MatchTable0); End = std::end(MatchTable0); break;
}
// Search the table.
auto MnemonicRange = std::equal_range(Start, End, Mnemonic, LessOpcode());
return MnemonicRange.first != MnemonicRange.second;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td (revision 356465)
@@ -1,1413 +1,1427 @@
//===- P9InstrResources.td - P9 Instruction Resource Defs -*- tablegen -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the resources required by P9 instructions. This is part of
// the P9 processor model used for instruction scheduling. This file should
// contain all the instructions that may be used on Power 9. This is not
// just instructions that are new on Power 9 but also instructions that were
// available on earlier architectures and are still used in Power 9.
//
// The makeup of the P9 CPU is modeled as follows:
// - Each CPU is made up of two superslices.
// - Each superslice is made up of two slices. Therefore, there are 4 slices
// for each CPU.
// - Up to 6 instructions can be dispatched to each CPU. Three per superslice.
// - Each CPU has:
// - One CY (Crypto) unit P9_CY_*
// - One DFU (Decimal Floating Point and Quad Precision) unit P9_DFU_*
// - Two PM (Permute) units. One on each superslice. P9_PM_*
// - Two DIV (Fixed Point Divide) units. One on each superslize. P9_DIV_*
// - Four ALU (Fixed Point Arithmetic) units. One on each slice. P9_ALU_*
// - Four DP (Floating Point) units. One on each slice. P9_DP_*
// This also includes fixed point multiply add.
// - Four AGEN (Address Generation) units. One for each slice. P9_AGEN_*
// - Four Load/Store Queues. P9_LS_*
// - Each set of instructions will require a number of these resources.
//===----------------------------------------------------------------------===//
// Two cycle ALU vector operation that uses an entire superslice.
// Uses both ALU units (the even ALUE and odd ALUO units), two pipelines
// (EXECE, EXECO) and 1 dispatch (DISP) to the given superslice.
def : InstRW<[P9_ALUE_2C, P9_ALUO_2C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
(instregex "VADDU(B|H|W|D)M$"),
(instregex "VAND(C)?$"),
(instregex "VEXTS(B|H|W)2(D|W)(s)?$"),
(instregex "V_SET0(B|H)?$"),
(instregex "VS(R|L)(B|H|W|D)$"),
(instregex "VSUBU(B|H|W|D)M$"),
(instregex "VPOPCNT(B|H)$"),
(instregex "VRL(B|H|W|D)$"),
(instregex "VSRA(B|H|W|D)$"),
(instregex "XV(N)?ABS(D|S)P$"),
(instregex "XVCPSGN(D|S)P$"),
(instregex "XV(I|X)EXP(D|S)P$"),
(instregex "VRL(D|W)(MI|NM)$"),
(instregex "VMRG(E|O)W$"),
MTVSRDD,
VEQV,
VNAND,
VNEGD,
VNEGW,
VNOR,
VOR,
VORC,
VSEL,
VXOR,
XVNEGDP,
XVNEGSP,
XXLAND,
XXLANDC,
XXLEQV,
XXLNAND,
XXLNOR,
XXLOR,
XXLORf,
XXLORC,
XXLXOR,
XXLXORdpz,
XXLXORspz,
XXLXORz,
XXSEL,
XSABSQP,
XSCPSGNQP,
XSIEXPQP,
XSNABSQP,
XSNEGQP,
XSXEXPQP
)>;
// Restricted Dispatch ALU operation for 3 cycles. The operation runs on a
// single slice. However, since it is Restricted, it requires all 3 dispatches
// (DISP) for that superslice.
def : InstRW<[P9_ALU_3C, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
(instregex "TABORT(D|W)C(I)?$"),
(instregex "MTFSB(0|1)$"),
(instregex "MFFSC(D)?RN(I)?$"),
(instregex "CMPRB(8)?$"),
(instregex "TD(I)?$"),
(instregex "TW(I)?$"),
(instregex "FCMPU(S|D)$"),
(instregex "XSTSTDC(S|D)P$"),
FTDIV,
FTSQRT,
CMPEQB
)>;
// Standard Dispatch ALU operation for 3 cycles. Only one slice used.
def : InstRW<[P9_ALU_3C, IP_EXEC_1C, DISP_1C],
(instrs
(instregex "XSMAX(C|J)?DP$"),
(instregex "XSMIN(C|J)?DP$"),
(instregex "XSCMP(EQ|EXP|GE|GT|O|U)DP$"),
(instregex "CNT(L|T)Z(D|W)(8)?(o)?$"),
(instregex "POPCNT(D|W)$"),
(instregex "CMPB(8)?$"),
(instregex "SETB(8)?$"),
XSTDIVDP,
XSTSQRTDP,
XSXSIGDP,
XSCVSPDPN,
BPERMD
)>;
// Standard Dispatch ALU operation for 2 cycles. Only one slice used.
def : InstRW<[P9_ALU_2C, IP_EXEC_1C, DISP_1C],
(instrs
(instregex "S(L|R)D$"),
(instregex "SRAD(I)?$"),
(instregex "EXTSWSLI_32_64$"),
(instregex "MFV(S)?RD$"),
(instregex "MTVSRD$"),
(instregex "MTVSRW(A|Z)$"),
(instregex "CMP(WI|LWI|W|LW)(8)?$"),
(instregex "CMP(L)?D(I)?$"),
- (instregex "SUBF(I)?C(8)?$"),
+ (instregex "SUBF(I)?C(8)?(O)?$"),
(instregex "ANDI(S)?o(8)?$"),
- (instregex "ADDC(8)?$"),
+ (instregex "ADDC(8)?(O)?$"),
(instregex "ADDIC(8)?(o)?$"),
- (instregex "ADD(8|4)(o)?$"),
- (instregex "ADD(E|ME|ZE)(8)?(o)?$"),
- (instregex "SUBF(E|ME|ZE)?(8)?(o)?$"),
- (instregex "NEG(8)?(o)?$"),
+ (instregex "ADD(8|4)(O)?(o)?$"),
+ (instregex "ADD(E|ME|ZE)(8)?(O)?(o)?$"),
+ (instregex "SUBF(E|ME|ZE)?(8)?(O)?(o)?$"),
+ (instregex "NEG(8)?(O)?(o)?$"),
(instregex "POPCNTB$"),
(instregex "ADD(I|IS)?(8)?$"),
(instregex "LI(S)?(8)?$"),
(instregex "(X)?OR(I|IS)?(8)?(o)?$"),
(instregex "NAND(8)?(o)?$"),
(instregex "AND(C)?(8)?(o)?$"),
(instregex "NOR(8)?(o)?$"),
(instregex "OR(C)?(8)?(o)?$"),
(instregex "EQV(8)?(o)?$"),
(instregex "EXTS(B|H|W)(8)?(_32)?(_64)?(o)?$"),
(instregex "ADD(4|8)(TLS)?(_)?$"),
- (instregex "NEG(8)?$"),
+ (instregex "NEG(8)?(O)?$"),
(instregex "ADDI(S)?toc(HA|L)$"),
COPY,
MCRF,
MCRXRX,
XSNABSDP,
XSXEXPDP,
XSABSDP,
XSNEGDP,
XSCPSGNDP,
MFVSRWZ,
EXTSWSLI,
SRADI_32,
RLDIC,
RFEBB,
LA,
TBEGIN,
TRECHKPT,
NOP,
WAIT
)>;
// Restricted Dispatch ALU operation for 2 cycles. The operation runs on a
// single slice. However, since it is Restricted, it requires all 3 dispatches
// (DISP) for that superslice.
def : InstRW<[P9_ALU_2C, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
(instregex "RLDC(L|R)$"),
(instregex "RLWIMI(8)?$"),
(instregex "RLDIC(L|R)(_32)?(_64)?$"),
(instregex "M(F|T)OCRF(8)?$"),
(instregex "CR(6)?(UN)?SET$"),
(instregex "CR(N)?(OR|AND)(C)?$"),
(instregex "S(L|R)W(8)?$"),
(instregex "RLW(INM|NM)(8)?$"),
(instregex "F(N)?ABS(D|S)$"),
(instregex "FNEG(D|S)$"),
(instregex "FCPSGN(D|S)$"),
(instregex "SRAW(I)?$"),
(instregex "ISEL(8)?$"),
RLDIMI,
XSIEXPDP,
FMR,
CREQV,
CRXOR,
TRECLAIM,
TSR,
TABORT
)>;
// Three cycle ALU vector operation that uses an entire superslice.
// Uses both ALU units (the even ALUE and odd ALUO units), two pipelines
// (EXECE, EXECO) and 1 dispatch (DISP) to the given superslice.
def : InstRW<[P9_ALUE_3C, P9_ALUO_3C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
(instregex "M(T|F)VSCR$"),
(instregex "VCMPNEZ(B|H|W)$"),
(instregex "VCMPEQU(B|H|W|D)$"),
(instregex "VCMPNE(B|H|W)$"),
(instregex "VABSDU(B|H|W)$"),
(instregex "VADDU(B|H|W)S$"),
(instregex "VAVG(S|U)(B|H|W)$"),
(instregex "VCMP(EQ|GE|GT)FP(o)?$"),
(instregex "VCMPBFP(o)?$"),
(instregex "VC(L|T)Z(B|H|W|D)$"),
(instregex "VADDS(B|H|W)S$"),
(instregex "V(MIN|MAX)FP$"),
(instregex "V(MIN|MAX)(S|U)(B|H|W|D)$"),
VBPERMD,
VADDCUW,
VPOPCNTW,
VPOPCNTD,
VPRTYBD,
VPRTYBW,
VSHASIGMAD,
VSHASIGMAW,
VSUBSBS,
VSUBSHS,
VSUBSWS,
VSUBUBS,
VSUBUHS,
VSUBUWS,
VSUBCUW,
VCMPGTSB,
VCMPGTSBo,
VCMPGTSD,
VCMPGTSDo,
VCMPGTSH,
VCMPGTSHo,
VCMPGTSW,
VCMPGTSWo,
VCMPGTUB,
VCMPGTUBo,
VCMPGTUD,
VCMPGTUDo,
VCMPGTUH,
VCMPGTUHo,
VCMPGTUW,
VCMPGTUWo,
VCMPNEBo,
VCMPNEHo,
VCMPNEWo,
VCMPNEZBo,
VCMPNEZHo,
VCMPNEZWo,
VCMPEQUBo,
VCMPEQUDo,
VCMPEQUHo,
VCMPEQUWo,
XVCMPEQDP,
XVCMPEQDPo,
XVCMPEQSP,
XVCMPEQSPo,
XVCMPGEDP,
XVCMPGEDPo,
XVCMPGESP,
XVCMPGESPo,
XVCMPGTDP,
XVCMPGTDPo,
XVCMPGTSP,
XVCMPGTSPo,
XVMAXDP,
XVMAXSP,
XVMINDP,
XVMINSP,
XVTDIVDP,
XVTDIVSP,
XVTSQRTDP,
XVTSQRTSP,
XVTSTDCDP,
XVTSTDCSP,
XVXSIGDP,
XVXSIGSP
)>;
// 7 cycle DP vector operation that uses an entire superslice.
// Uses both DP units (the even DPE and odd DPO units), two pipelines (EXECE,
// EXECO) and all three dispatches (DISP) to the given superslice.
def : InstRW<[P9_DPE_7C, P9_DPO_7C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
VADDFP,
VCTSXS,
VCTSXS_0,
VCTUXS,
VCTUXS_0,
VEXPTEFP,
VLOGEFP,
VMADDFP,
VMHADDSHS,
VNMSUBFP,
VREFP,
VRFIM,
VRFIN,
VRFIP,
VRFIZ,
VRSQRTEFP,
VSUBFP,
XVADDDP,
XVADDSP,
XVCVDPSP,
XVCVDPSXDS,
XVCVDPSXWS,
XVCVDPUXDS,
XVCVDPUXWS,
XVCVHPSP,
XVCVSPDP,
XVCVSPHP,
XVCVSPSXDS,
XVCVSPSXWS,
XVCVSPUXDS,
XVCVSPUXWS,
XVCVSXDDP,
XVCVSXDSP,
XVCVSXWDP,
XVCVSXWSP,
XVCVUXDDP,
XVCVUXDSP,
XVCVUXWDP,
XVCVUXWSP,
XVMADDADP,
XVMADDASP,
XVMADDMDP,
XVMADDMSP,
XVMSUBADP,
XVMSUBASP,
XVMSUBMDP,
XVMSUBMSP,
XVMULDP,
XVMULSP,
XVNMADDADP,
XVNMADDASP,
XVNMADDMDP,
XVNMADDMSP,
XVNMSUBADP,
XVNMSUBASP,
XVNMSUBMDP,
XVNMSUBMSP,
XVRDPI,
XVRDPIC,
XVRDPIM,
XVRDPIP,
XVRDPIZ,
XVREDP,
XVRESP,
XVRSPI,
XVRSPIC,
XVRSPIM,
XVRSPIP,
XVRSPIZ,
XVRSQRTEDP,
XVRSQRTESP,
XVSUBDP,
XVSUBSP,
VCFSX,
VCFSX_0,
VCFUX,
VCFUX_0,
VMHRADDSHS,
VMLADDUHM,
VMSUMMBM,
VMSUMSHM,
VMSUMSHS,
VMSUMUBM,
VMSUMUHM,
VMSUMUHS,
VMULESB,
VMULESH,
VMULESW,
VMULEUB,
VMULEUH,
VMULEUW,
VMULOSB,
VMULOSH,
VMULOSW,
VMULOUB,
VMULOUH,
VMULOUW,
VMULUWM,
VSUM2SWS,
VSUM4SBS,
VSUM4SHS,
VSUM4UBS,
VSUMSWS
)>;
// 5 cycle Restricted DP operation. One DP unit, one EXEC pipeline and all three
// dispatch units for the superslice.
def : InstRW<[P9_DP_5C, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
(instregex "MADD(HD|HDU|LD|LD8)$"),
- (instregex "MUL(HD|HW|LD|LI|LI8|LW)(U)?$")
+ (instregex "MUL(HD|HW|LD|LI|LI8|LW)(U)?(O)?$")
)>;
// 7 cycle Restricted DP operation. One DP unit, one EXEC pipeline and all three
// dispatch units for the superslice.
def : InstRW<[P9_DP_7C, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
FRSP,
(instregex "FRI(N|P|Z|M)(D|S)$"),
(instregex "FRE(S)?$"),
(instregex "FADD(S)?$"),
(instregex "FMSUB(S)?$"),
(instregex "FMADD(S)?$"),
(instregex "FSUB(S)?$"),
(instregex "FCFID(U)?(S)?$"),
(instregex "FCTID(U)?(Z)?$"),
(instregex "FCTIW(U)?(Z)?$"),
(instregex "FRSQRTE(S)?$"),
FNMADDS,
FNMADD,
FNMSUBS,
FNMSUB,
FSELD,
FSELS,
FMULS,
FMUL,
XSMADDADP,
XSMADDASP,
XSMADDMDP,
XSMADDMSP,
XSMSUBADP,
XSMSUBASP,
XSMSUBMDP,
XSMSUBMSP,
XSMULDP,
XSMULSP,
XSNMADDADP,
XSNMADDASP,
XSNMADDMDP,
XSNMADDMSP,
XSNMSUBADP,
XSNMSUBASP,
XSNMSUBMDP,
XSNMSUBMSP
)>;
// 7 cycle Restricted DP operation and one 3 cycle ALU operation.
// These operations can be done in parallel. The DP is restricted so we need a
// full 4 dispatches.
def : InstRW<[P9_DP_7C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "FSEL(D|S)o$")
)>;
// 5 Cycle Restricted DP operation and one 2 cycle ALU operation.
def : InstRW<[P9_DPOpAndALUOp_7C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
- (instregex "MUL(H|L)(D|W)(U)?o$")
+ (instregex "MUL(H|L)(D|W)(U)?(O)?o$")
)>;
// 7 cycle Restricted DP operation and one 3 cycle ALU operation.
// These operations must be done sequentially.The DP is restricted so we need a
// full 4 dispatches.
def : InstRW<[P9_DPOpAndALU2Op_10C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "FRI(N|P|Z|M)(D|S)o$"),
(instregex "FRE(S)?o$"),
(instregex "FADD(S)?o$"),
(instregex "FSUB(S)?o$"),
(instregex "F(N)?MSUB(S)?o$"),
(instregex "F(N)?MADD(S)?o$"),
(instregex "FCFID(U)?(S)?o$"),
(instregex "FCTID(U)?(Z)?o$"),
(instregex "FCTIW(U)?(Z)?o$"),
(instregex "FMUL(S)?o$"),
(instregex "FRSQRTE(S)?o$"),
FRSPo
)>;
// 7 cycle DP operation. One DP unit, one EXEC pipeline and 1 dispatch units.
def : InstRW<[P9_DP_7C, IP_EXEC_1C, DISP_1C],
(instrs
XSADDDP,
XSADDSP,
XSCVDPHP,
XSCVDPSP,
XSCVDPSXDS,
XSCVDPSXDSs,
XSCVDPSXWS,
XSCVDPUXDS,
XSCVDPUXDSs,
XSCVDPUXWS,
XSCVDPSXWSs,
XSCVDPUXWSs,
XSCVHPDP,
XSCVSPDP,
XSCVSXDDP,
XSCVSXDSP,
XSCVUXDDP,
XSCVUXDSP,
XSRDPI,
XSRDPIC,
XSRDPIM,
XSRDPIP,
XSRDPIZ,
XSREDP,
XSRESP,
XSRSQRTEDP,
XSRSQRTESP,
XSSUBDP,
XSSUBSP,
XSCVDPSPN,
XSRSP
)>;
// Three Cycle PM operation. Only one PM unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_PM_3C, IP_EXECO_1C, IP_EXECE_1C, DISP_1C],
(instrs
(instregex "LVS(L|R)$"),
(instregex "VSPLTIS(W|H|B)$"),
(instregex "VSPLT(W|H|B)(s)?$"),
(instregex "V_SETALLONES(B|H)?$"),
(instregex "VEXTRACTU(B|H|W)$"),
(instregex "VINSERT(B|H|W|D)$"),
MFVSRLD,
MTVSRWS,
VBPERMQ,
VCLZLSBB,
VCTZLSBB,
VEXTRACTD,
VEXTUBLX,
VEXTUBRX,
VEXTUHLX,
VEXTUHRX,
VEXTUWLX,
VEXTUWRX,
VGBBD,
VMRGHB,
VMRGHH,
VMRGHW,
VMRGLB,
VMRGLH,
VMRGLW,
VPERM,
VPERMR,
VPERMXOR,
VPKPX,
VPKSDSS,
VPKSDUS,
VPKSHSS,
VPKSHUS,
VPKSWSS,
VPKSWUS,
VPKUDUM,
VPKUDUS,
VPKUHUM,
VPKUHUS,
VPKUWUM,
VPKUWUS,
VPRTYBQ,
VSL,
VSLDOI,
VSLO,
VSLV,
VSR,
VSRO,
VSRV,
VUPKHPX,
VUPKHSB,
VUPKHSH,
VUPKHSW,
VUPKLPX,
VUPKLSB,
VUPKLSH,
VUPKLSW,
XXBRD,
XXBRH,
XXBRQ,
XXBRW,
XXEXTRACTUW,
XXINSERTW,
XXMRGHW,
XXMRGLW,
XXPERM,
XXPERMR,
XXSLDWI,
XXSLDWIs,
XXSPLTIB,
XXSPLTW,
XXSPLTWs,
XXPERMDI,
XXPERMDIs,
VADDCUQ,
VADDECUQ,
VADDEUQM,
VADDUQM,
VMUL10CUQ,
VMUL10ECUQ,
VMUL10EUQ,
VMUL10UQ,
VSUBCUQ,
VSUBECUQ,
VSUBEUQM,
VSUBUQM,
XSCMPEXPQP,
XSCMPOQP,
XSCMPUQP,
XSTSTDCQP,
XSXSIGQP,
BCDCFNo,
BCDCFZo,
BCDCPSGNo,
BCDCTNo,
BCDCTZo,
BCDSETSGNo,
BCDSo,
BCDTRUNCo,
BCDUSo,
BCDUTRUNCo
)>;
// 12 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_DFU_12C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
BCDSRo,
XSADDQP,
XSADDQPO,
XSCVDPQP,
XSCVQPDP,
XSCVQPDPO,
XSCVQPSDZ,
XSCVQPSWZ,
XSCVQPUDZ,
XSCVQPUWZ,
XSCVSDQP,
XSCVUDQP,
XSRQPI,
XSRQPIX,
XSRQPXP,
XSSUBQP,
XSSUBQPO
)>;
// 23 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_DFU_23C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
BCDCTSQo
)>;
// 24 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_DFU_24C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
XSMADDQP,
XSMADDQPO,
XSMSUBQP,
XSMSUBQPO,
XSMULQP,
XSMULQPO,
XSNMADDQP,
XSNMADDQPO,
XSNMSUBQP,
XSNMSUBQPO
)>;
// 37 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_DFU_37C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
BCDCFSQo
)>;
// 58 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_DFU_58C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
XSDIVQP,
XSDIVQPO
)>;
// 76 Cycle DFU operation. Only one DFU unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and all three
// dispatches.
def : InstRW<[P9_DFU_76C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C],
(instrs
XSSQRTQP,
XSSQRTQPO
)>;
// 6 Cycle Load uses a single slice.
def : InstRW<[P9_LS_6C, IP_AGEN_1C, DISP_1C],
(instrs
(instregex "LXVL(L)?")
)>;
// 5 Cycle Load uses a single slice.
def : InstRW<[P9_LS_5C, IP_AGEN_1C, DISP_1C],
(instrs
(instregex "LVE(B|H|W)X$"),
(instregex "LVX(L)?"),
(instregex "LXSI(B|H)ZX$"),
LXSDX,
LXVB16X,
LXVD2X,
LXVWSX,
LXSIWZX,
LXV,
LXVX,
LXSD,
DFLOADf64,
XFLOADf64,
LIWZX
)>;
// 4 Cycle Load uses a single slice.
def : InstRW<[P9_LS_4C, IP_AGEN_1C, DISP_1C],
(instrs
(instregex "DCB(F|T|ST)(EP)?$"),
(instregex "DCBZ(L)?(EP)?$"),
(instregex "DCBTST(EP)?$"),
(instregex "CP_COPY(8)?$"),
(instregex "CP_PASTE(8)?$"),
(instregex "ICBI(EP)?$"),
(instregex "ICBT(LS)?$"),
(instregex "LBARX(L)?$"),
(instregex "LBZ(CIX|8|X|X8|XTLS|XTLS_32)?(_)?$"),
(instregex "LD(ARX|ARXL|BRX|CIX|X|XTLS)?(_)?$"),
(instregex "LH(A|B)RX(L)?(8)?$"),
(instregex "LHZ(8|CIX|X|X8|XTLS|XTLS_32)?(_)?$"),
(instregex "LWARX(L)?$"),
(instregex "LWBRX(8)?$"),
(instregex "LWZ(8|CIX|X|X8|XTLS|XTLS_32)?(_)?$"),
CP_ABORT,
DARN,
EnforceIEIO,
ISYNC,
MSGSYNC,
TLBSYNC,
SYNC,
LMW,
LSWI
)>;
// 4 Cycle Restricted load uses a single slice but the dispatch for the whole
// superslice.
def : InstRW<[P9_LS_4C, IP_AGEN_1C, DISP_3SLOTS_1C],
(instrs
LFIWZX,
LFDX,
LFD
)>;
// Cracked Load Instructions.
// Load instructions that can be done in parallel.
def : InstRW<[P9_LS_4C, P9_LS_4C, IP_AGEN_1C, IP_AGEN_1C,
DISP_PAIR_1C],
(instrs
SLBIA,
SLBIE,
SLBMFEE,
SLBMFEV,
SLBMTE,
TLBIEL
)>;
// Cracked Load Instruction.
// Requires Load and ALU pieces totaling 6 cycles. The Load and ALU
// operations can be run in parallel.
def : InstRW<[P9_LS_4C, P9_ALU_2C, IP_EXEC_1C, IP_AGEN_1C,
DISP_PAIR_1C, DISP_PAIR_1C],
(instrs
(instregex "L(W|H)ZU(X)?(8)?$")
)>;
// Cracked TEND Instruction.
// Requires Load and ALU pieces totaling 6 cycles. The Load and ALU
// operations can be run in parallel.
def : InstRW<[P9_LS_4C, P9_ALU_2C, IP_EXEC_1C, IP_AGEN_1C,
DISP_1C, DISP_1C],
(instrs
TEND
)>;
// Cracked Store Instruction
// Consecutive Store and ALU instructions. The store is restricted and requires
// three dispatches.
def : InstRW<[P9_StoreAndALUOp_3C, IP_EXEC_1C, IP_EXEC_1C, IP_AGEN_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "ST(B|H|W|D)CX$")
)>;
// Cracked Load Instruction.
// Two consecutive load operations for a total of 8 cycles.
def : InstRW<[P9_LoadAndLoadOp_8C, IP_AGEN_1C, IP_AGEN_1C,
DISP_1C, DISP_1C],
(instrs
LDMX
)>;
// Cracked Load instruction.
// Requires consecutive Load and ALU pieces totaling 6 cycles. The Load and ALU
// operations cannot be done at the same time and so their latencies are added.
def : InstRW<[P9_LoadAndALUOp_6C, IP_EXEC_1C, IP_AGEN_1C,
DISP_1C, DISP_1C],
(instrs
(instregex "LHA(X)?(8)?$"),
(instregex "CP_PASTE(8)?o$"),
(instregex "LWA(X)?(_32)?$"),
TCHECK
)>;
// Cracked Restricted Load instruction.
// Requires consecutive Load and ALU pieces totaling 6 cycles. The Load and ALU
// operations cannot be done at the same time and so their latencies are added.
// Full 6 dispatches are required as this is both cracked and restricted.
def : InstRW<[P9_LoadAndALUOp_6C, IP_EXEC_1C, IP_AGEN_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
LFIWAX
)>;
// Cracked Load instruction.
// Requires consecutive Load and ALU pieces totaling 7 cycles. The Load and ALU
// operations cannot be done at the same time and so their latencies are added.
// Full 4 dispatches are required as this is a cracked instruction.
def : InstRW<[P9_LoadAndALUOp_7C, IP_AGEN_1C, IP_EXEC_1C, DISP_1C, DISP_1C],
(instrs
LXSIWAX,
LIWAX
)>;
// Cracked Load instruction.
// Requires consecutive Load (4 cycles) and ALU (3 cycles) pieces totaling 7
// cycles. The Load and ALU operations cannot be done at the same time and so
// their latencies are added.
// Full 6 dispatches are required as this is a restricted instruction.
def : InstRW<[P9_LoadAndALU2Op_7C, IP_AGEN_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
LFSX,
LFS
)>;
// Cracked Load instruction.
// Requires consecutive Load and ALU pieces totaling 8 cycles. The Load and ALU
// operations cannot be done at the same time and so their latencies are added.
// Full 4 dispatches are required as this is a cracked instruction.
def : InstRW<[P9_LoadAndALU2Op_8C, IP_AGEN_1C, IP_EXEC_1C, DISP_1C, DISP_1C],
(instrs
LXSSP,
LXSSPX,
XFLOADf32,
DFLOADf32
)>;
// Cracked 3-Way Load Instruction
// Load with two ALU operations that depend on each other
def : InstRW<[P9_LoadAndALUOp_6C, P9_ALU_2C, IP_AGEN_1C, IP_EXEC_1C, IP_EXEC_1C,
DISP_PAIR_1C, DISP_PAIR_1C, DISP_1C],
(instrs
(instregex "LHAU(X)?(8)?$"),
LWAUX
)>;
// Cracked Load that requires the PM resource.
// Since the Load and the PM cannot be done at the same time the latencies are
// added. Requires 8 cycles. Since the PM requires the full superslice we need
// both EXECE, EXECO pipelines as well as 1 dispatch for the PM. The Load
// requires the remaining 1 dispatch.
def : InstRW<[P9_LoadAndPMOp_8C, IP_AGEN_1C, IP_EXECE_1C, IP_EXECO_1C,
DISP_1C, DISP_1C],
(instrs
LXVH8X,
LXVDSX,
LXVW4X
)>;
// Single slice Restricted store operation. The restricted operation requires
// all three dispatches for the superslice.
def : InstRW<[P9_LS_1C, IP_EXEC_1C, IP_AGEN_1C, DISP_3SLOTS_1C],
(instrs
(instregex "STF(S|D|IWX|SX|DX)$"),
(instregex "STXS(D|DX|SPX|IWX|IBX|IHX|SP)(v)?$"),
(instregex "STW(8)?$"),
(instregex "(D|X)FSTORE(f32|f64)$"),
(instregex "ST(W|H|D)BRX$"),
(instregex "ST(B|H|D)(8)?$"),
(instregex "ST(B|W|H|D)(CI)?X(TLS|TLS_32)?(8)?(_)?$"),
STIWX,
SLBIEG,
STMW,
STSWI,
TLBIE
)>;
// Vector Store Instruction
// Requires the whole superslice and therefore requires one dispatch
// as well as both the Even and Odd exec pipelines.
def : InstRW<[P9_LS_1C, IP_EXECE_1C, IP_EXECO_1C, IP_AGEN_1C, DISP_1C],
(instrs
(instregex "STVE(B|H|W)X$"),
(instregex "STVX(L)?$"),
(instregex "STXV(B16X|H8X|W4X|D2X|L|LL|X)?$")
)>;
// 5 Cycle DIV operation. Only one DIV unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and two
// dispatches.
def : InstRW<[P9_DIV_5C, IP_EXECE_1C, IP_EXECO_1C, DISP_EVEN_1C],
(instrs
(instregex "MTCTR(8)?(loop)?$"),
(instregex "MTLR(8)?$")
)>;
// 12 Cycle DIV operation. Only one DIV unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and two
// dispatches.
def : InstRW<[P9_DIV_12C, IP_EXECE_1C, IP_EXECO_1C, DISP_EVEN_1C],
(instrs
(instregex "M(T|F)VRSAVE(v)?$"),
(instregex "M(T|F)PMR$"),
(instregex "M(T|F)TB(8)?$"),
(instregex "MF(SPR|CTR|LR)(8)?$"),
(instregex "M(T|F)MSR(D)?$"),
(instregex "MTSPR(8)?$")
)>;
// 16 Cycle DIV operation. Only one DIV unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and two
// dispatches.
def : InstRW<[P9_DIV_16C_8, IP_EXECO_1C, IP_EXECE_1C, DISP_EVEN_1C],
(instrs
DIVW,
+ DIVWO,
DIVWU,
+ DIVWUO,
MODSW
)>;
// 24 Cycle DIV operation. Only one DIV unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and two
// dispatches.
def : InstRW<[P9_DIV_24C_8, IP_EXECO_1C, IP_EXECE_1C, DISP_EVEN_1C],
(instrs
DIVWE,
+ DIVWEO,
DIVD,
+ DIVDO,
DIVWEU,
+ DIVWEUO,
DIVDU,
+ DIVDUO,
MODSD,
MODUD,
MODUW
)>;
// 40 Cycle DIV operation. Only one DIV unit per superslice so we use the whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and all three
// dispatches.
def : InstRW<[P9_DIV_40C_8, IP_EXECO_1C, IP_EXECE_1C, DISP_EVEN_1C],
(instrs
DIVDE,
- DIVDEU
+ DIVDEO,
+ DIVDEU,
+ DIVDEUO
)>;
// Cracked DIV and ALU operation. Requires one full slice for the ALU operation
// and one full superslice for the DIV operation since there is only one DIV per
// superslice. Latency of DIV plus ALU is 26.
def : InstRW<[P9_IntDivAndALUOp_18C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C,
DISP_EVEN_1C, DISP_1C],
(instrs
(instregex "DIVW(U)?(O)?o$")
)>;
// Cracked DIV and ALU operation. Requires one full slice for the ALU operation
// and one full superslice for the DIV operation since there is only one DIV per
// superslice. Latency of DIV plus ALU is 26.
def : InstRW<[P9_IntDivAndALUOp_26C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C,
DISP_EVEN_1C, DISP_1C],
(instrs
DIVDo,
+ DIVDOo,
DIVDUo,
+ DIVDUOo,
DIVWEo,
- DIVWEUo
+ DIVWEOo,
+ DIVWEUo,
+ DIVWEUOo
)>;
// Cracked DIV and ALU operation. Requires one full slice for the ALU operation
// and one full superslice for the DIV operation since there is only one DIV per
// superslice. Latency of DIV plus ALU is 42.
def : InstRW<[P9_IntDivAndALUOp_42C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C,
DISP_EVEN_1C, DISP_1C],
(instrs
DIVDEo,
- DIVDEUo
+ DIVDEOo,
+ DIVDEUo,
+ DIVDEUOo
)>;
// CR access instructions in _BrMCR, IIC_BrMCRX.
// Cracked, restricted, ALU operations.
// Here the two ALU ops can actually be done in parallel and therefore the
// latencies are not added together. Otherwise this is like having two
// instructions running together on two pipelines and 6 dispatches. ALU ops are
// 2 cycles each.
def : InstRW<[P9_ALU_2C, P9_ALU_2C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
MTCRF,
MTCRF8
)>;
// Cracked ALU operations.
// Here the two ALU ops can actually be done in parallel and therefore the
// latencies are not added together. Otherwise this is like having two
// instructions running together on two pipelines and 2 dispatches. ALU ops are
// 2 cycles each.
def : InstRW<[P9_ALU_2C, P9_ALU_2C, IP_EXEC_1C, IP_EXEC_1C,
DISP_1C, DISP_1C],
(instrs
- (instregex "ADDC(8)?o$"),
- (instregex "SUBFC(8)?o$")
+ (instregex "ADDC(8)?(O)?o$"),
+ (instregex "SUBFC(8)?(O)?o$")
)>;
// Cracked ALU operations.
// Two ALU ops can be done in parallel.
// One is three cycle ALU the ohter is a two cycle ALU.
// One of the ALU ops is restricted the other is not so we have a total of
// 5 dispatches.
def : InstRW<[P9_ALU_2C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "F(N)?ABS(D|S)o$"),
(instregex "FCPSGN(D|S)o$"),
(instregex "FNEG(D|S)o$"),
FMRo
)>;
// Cracked ALU operations.
// Here the two ALU ops can actually be done in parallel and therefore the
// latencies are not added together. Otherwise this is like having two
// instructions running together on two pipelines and 2 dispatches.
// ALU ops are 3 cycles each.
def : InstRW<[P9_ALU_3C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C,
DISP_1C, DISP_1C],
(instrs
MCRFS
)>;
// Cracked Restricted ALU operations.
// Here the two ALU ops can actually be done in parallel and therefore the
// latencies are not added together. Otherwise this is like having two
// instructions running together on two pipelines and 6 dispatches.
// ALU ops are 3 cycles each.
def : InstRW<[P9_ALU_3C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
(instregex "MTFSF(b|o)?$"),
(instregex "MTFSFI(o)?$")
)>;
// Cracked instruction made of two ALU ops.
// The two ops cannot be done in parallel.
// One of the ALU ops is restricted and takes 3 dispatches.
def : InstRW<[P9_ALUOpAndALUOp_4C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "RLD(I)?C(R|L)o$"),
(instregex "RLW(IMI|INM|NM)(8)?o$"),
(instregex "SLW(8)?o$"),
(instregex "SRAW(I)?o$"),
(instregex "SRW(8)?o$"),
RLDICL_32o,
RLDIMIo
)>;
// Cracked instruction made of two ALU ops.
// The two ops cannot be done in parallel.
// Both of the ALU ops are restricted and take 3 dispatches.
def : InstRW<[P9_ALU2OpAndALU2Op_6C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
(instregex "MFFS(L|CE|o)?$")
)>;
// Cracked ALU instruction composed of three consecutive 2 cycle loads for a
// total of 6 cycles. All of the ALU operations are also restricted so each
// takes 3 dispatches for a total of 9.
def : InstRW<[P9_ALUOpAndALUOpAndALUOp_6C, IP_EXEC_1C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C, DISP_3SLOTS_1C],
(instrs
(instregex "MFCR(8)?$")
)>;
// Cracked instruction made of two ALU ops.
// The two ops cannot be done in parallel.
def : InstRW<[P9_ALUOpAndALUOp_4C, IP_EXEC_1C, IP_EXEC_1C, DISP_1C, DISP_1C],
(instrs
(instregex "EXTSWSLI_32_64o$"),
(instregex "SRAD(I)?o$"),
EXTSWSLIo,
SLDo,
SRDo,
RLDICo
)>;
// 33 Cycle DP Instruction Restricted. Takes one slice and 3 dispatches.
def : InstRW<[P9_DP_33C_8, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
FDIV
)>;
// 33 Cycle DP Instruction Restricted and Cracked with 3 Cycle ALU.
def : InstRW<[P9_DPOpAndALU2Op_36C_8, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
FDIVo
)>;
// 36 Cycle DP Instruction.
// Instruction can be done on a single slice.
def : InstRW<[P9_DP_36C_10, IP_EXEC_1C, DISP_1C],
(instrs
XSSQRTDP
)>;
// 36 Cycle DP Instruction Restricted. Takes one slice and 3 dispatches.
def : InstRW<[P9_DP_36C_10, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
FSQRT
)>;
// 36 Cycle DP Vector Instruction.
def : InstRW<[P9_DPE_36C_10, P9_DPO_36C_10, IP_EXECE_1C, IP_EXECO_1C,
DISP_1C],
(instrs
XVSQRTDP
)>;
// 27 Cycle DP Vector Instruction.
def : InstRW<[P9_DPE_27C_10, P9_DPO_27C_10, IP_EXECE_1C, IP_EXECO_1C,
DISP_1C],
(instrs
XVSQRTSP
)>;
// 36 Cycle DP Instruction Restricted and Cracked with 3 Cycle ALU.
def : InstRW<[P9_DPOpAndALU2Op_39C_10, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
FSQRTo
)>;
// 26 Cycle DP Instruction.
def : InstRW<[P9_DP_26C_5, IP_EXEC_1C, DISP_1C],
(instrs
XSSQRTSP
)>;
// 26 Cycle DP Instruction Restricted. Takes one slice and 3 dispatches.
def : InstRW<[P9_DP_26C_5, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
FSQRTS
)>;
// 26 Cycle DP Instruction Restricted and Cracked with 3 Cycle ALU.
def : InstRW<[P9_DPOpAndALU2Op_29C_5, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
FSQRTSo
)>;
// 33 Cycle DP Instruction. Takes one slice and 1 dispatch.
def : InstRW<[P9_DP_33C_8, IP_EXEC_1C, DISP_1C],
(instrs
XSDIVDP
)>;
// 22 Cycle DP Instruction Restricted. Takes one slice and 3 dispatches.
def : InstRW<[P9_DP_22C_5, IP_EXEC_1C, DISP_3SLOTS_1C],
(instrs
FDIVS
)>;
// 22 Cycle DP Instruction Restricted and Cracked with 2 Cycle ALU.
def : InstRW<[P9_DPOpAndALU2Op_25C_5, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
FDIVSo
)>;
// 22 Cycle DP Instruction. Takes one slice and 1 dispatch.
def : InstRW<[P9_DP_22C_5, IP_EXEC_1C, DISP_1C],
(instrs
XSDIVSP
)>;
// 24 Cycle DP Vector Instruction. Takes one full superslice.
// Includes both EXECE, EXECO pipelines and 1 dispatch for the given
// superslice.
def : InstRW<[P9_DPE_24C_8, P9_DPO_24C_8, IP_EXECE_1C, IP_EXECO_1C,
DISP_1C],
(instrs
XVDIVSP
)>;
// 33 Cycle DP Vector Instruction. Takes one full superslice.
// Includes both EXECE, EXECO pipelines and 1 dispatch for the given
// superslice.
def : InstRW<[P9_DPE_33C_8, P9_DPO_33C_8, IP_EXECE_1C, IP_EXECO_1C,
DISP_1C],
(instrs
XVDIVDP
)>;
// Instruction cracked into three pieces. One Load and two ALU operations.
// The Load and one of the ALU ops cannot be run at the same time and so the
// latencies are added together for 6 cycles. The remainaing ALU is 2 cycles.
// Both the load and the ALU that depends on it are restricted and so they take
// a total of 7 dispatches. The final 2 dispatches come from the second ALU op.
// The two EXEC pipelines are for the 2 ALUs while the AGEN is for the load.
def : InstRW<[P9_LoadAndALU2Op_7C, P9_ALU_2C,
IP_AGEN_1C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "LF(SU|SUX)$")
)>;
// Cracked instruction made up of a Store and an ALU. The ALU does not depend on
// the store and so it can be run at the same time as the store. The store is
// also restricted.
def : InstRW<[P9_LS_1C, P9_ALU_2C, IP_AGEN_1C, IP_EXEC_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "STF(S|D)U(X)?$"),
(instregex "ST(B|H|W|D)U(X)?(8)?$")
)>;
// Cracked instruction made up of a Load and an ALU. The ALU does not depend on
// the load and so it can be run at the same time as the load.
def : InstRW<[P9_LS_4C, P9_ALU_2C, IP_AGEN_1C, IP_EXEC_1C,
DISP_PAIR_1C, DISP_PAIR_1C],
(instrs
(instregex "LBZU(X)?(8)?$"),
(instregex "LDU(X)?$")
)>;
// Cracked instruction made up of a Load and an ALU. The ALU does not depend on
// the load and so it can be run at the same time as the load. The load is also
// restricted. 3 dispatches are from the restricted load while the other two
// are from the ALU. The AGEN pipeline is from the load and the EXEC pipeline
// is required for the ALU.
def : InstRW<[P9_LS_4C, P9_ALU_2C, IP_AGEN_1C, IP_EXEC_1C,
DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "LF(DU|DUX)$")
)>;
// Crypto Instructions
// 6 Cycle CY operation. Only one CY unit per CPU so we use a whole
// superslice. That includes both exec pipelines (EXECO, EXECE) and one
// dispatch.
def : InstRW<[P9_CY_6C, IP_EXECO_1C, IP_EXECE_1C, DISP_1C],
(instrs
(instregex "VPMSUM(B|H|W|D)$"),
(instregex "V(N)?CIPHER(LAST)?$"),
VSBOX
)>;
// Branch Instructions
// Two Cycle Branch
def : InstRW<[P9_BR_2C, DISP_BR_1C],
(instrs
(instregex "BCCCTR(L)?(8)?$"),
(instregex "BCCL(A|R|RL)?$"),
(instregex "BCCTR(L)?(8)?(n)?$"),
(instregex "BD(N)?Z(8|A|Am|Ap|m|p)?$"),
(instregex "BD(N)?ZL(A|Am|Ap|R|R8|RL|RLm|RLp|Rm|Rp|m|p)?$"),
(instregex "BL(_TLS|_NOP)?$"),
(instregex "BL8(_TLS|_NOP|_NOP_TLS|_TLS_)?$"),
(instregex "BLA(8|8_NOP)?$"),
(instregex "BLR(8|L)?$"),
(instregex "TAILB(A)?(8)?$"),
(instregex "TAILBCTR(8)?$"),
(instregex "gBC(A|Aat|CTR|CTRL|L|LA|LAat|LR|LRL|Lat|at)?$"),
(instregex "BCLR(L)?(n)?$"),
(instregex "BCTR(L)?(8)?$"),
B,
BA,
BC,
BCC,
BCCA,
BCL,
BCLalways,
BCLn,
BCTRL8_LDinto_toc,
BCn,
CTRL_DEP
)>;
// Five Cycle Branch with a 2 Cycle ALU Op
// Operations must be done consecutively and not in parallel.
def : InstRW<[P9_BROpAndALUOp_7C, IP_EXEC_1C, DISP_BR_1C, DISP_1C],
(instrs
ADDPCIS
)>;
// Special Extracted Instructions For Atomics
// Atomic Load
def : InstRW<[P9_LS_1C, P9_LS_1C, P9_LS_4C, P9_LS_4C, P9_LS_4C,
IP_EXEC_1C, IP_EXEC_1C, IP_AGEN_1C, IP_AGEN_1C, IP_AGEN_1C,
IP_AGEN_1C, IP_AGEN_1C, DISP_1C, DISP_3SLOTS_1C,
DISP_3SLOTS_1C, DISP_1C, DISP_1C, DISP_1C],
(instrs
(instregex "L(D|W)AT$")
)>;
// Atomic Store
def : InstRW<[P9_LS_1C, P9_LS_4C, P9_LS_4C, IP_EXEC_1C, IP_AGEN_1C, IP_AGEN_1C,
IP_AGEN_1C, DISP_1C, DISP_3SLOTS_1C, DISP_1C],
(instrs
(instregex "ST(D|W)AT$")
)>;
// Signal Processing Engine (SPE) Instructions
// These instructions are not supported on Power 9
def : InstRW<[],
(instrs
BRINC,
EVABS,
EVEQV,
EVMRA,
EVNAND,
EVNEG,
(instregex "EVADD(I)?W$"),
(instregex "EVADD(SM|SS|UM|US)IAAW$"),
(instregex "EVAND(C)?$"),
(instregex "EVCMP(EQ|GTS|GTU|LTS|LTU)$"),
(instregex "EVCNTL(S|Z)W$"),
(instregex "EVDIVW(S|U)$"),
(instregex "EVEXTS(B|H)$"),
(instregex "EVLD(H|W|D)(X)?$"),
(instregex "EVLHH(E|OS|OU)SPLAT(X)?$"),
(instregex "EVLWHE(X)?$"),
(instregex "EVLWHO(S|U)(X)?$"),
(instregex "EVLW(H|W)SPLAT(X)?$"),
(instregex "EVMERGE(HI|LO|HILO|LOHI)$"),
(instregex "EVMHEG(S|U)M(F|I)A(A|N)$"),
(instregex "EVMHES(M|S)(F|I)(A|AA|AAW|ANW)?$"),
(instregex "EVMHEU(M|S)I(A|AA|AAW|ANW)?$"),
(instregex "EVMHOG(U|S)M(F|I)A(A|N)$"),
(instregex "EVMHOS(M|S)(F|I)(A|AA|AAW|ANW)?$"),
(instregex "EVMHOU(M|S)I(A|AA|ANW|AAW)?$"),
(instregex "EVMWHS(M|S)(F|FA|I|IA)$"),
(instregex "EVMWHUMI(A)?$"),
(instregex "EVMWLS(M|S)IA(A|N)W$"),
(instregex "EVMWLU(M|S)I(A|AA|AAW|ANW)?$"),
(instregex "EVMWSM(F|I)(A|AA|AN)?$"),
(instregex "EVMWSSF(A|AA|AN)?$"),
(instregex "EVMWUMI(A|AA|AN)?$"),
(instregex "EV(N|X)?OR(C)?$"),
(instregex "EVR(LW|LWI|NDW)$"),
(instregex "EVSLW(I)?$"),
(instregex "EVSPLAT(F)?I$"),
(instregex "EVSRW(I)?(S|U)$"),
(instregex "EVST(DD|DH|DW|WHE|WHO|WWE|WWO)(X)?$"),
(instregex "EVSUBF(S|U)(M|S)IAAW$"),
(instregex "EVSUB(I)?FW$")
)> { let Unsupported = 1; }
// General Instructions without scheduling support.
def : InstRW<[],
(instrs
(instregex "(H)?RFI(D)?$"),
(instregex "DSS(ALL)?$"),
(instregex "DST(ST)?(T)?(64)?$"),
(instregex "ICBL(C|Q)$"),
(instregex "L(W|H|B)EPX$"),
(instregex "ST(W|H|B)EPX$"),
(instregex "(L|ST)FDEPX$"),
(instregex "M(T|F)SR(IN)?$"),
(instregex "M(T|F)DCR$"),
(instregex "NOP_GT_PWR(6|7)$"),
(instregex "TLB(IA|IVAX|SX|SX2|SX2D|LD|LI|RE|RE2|WE|WE2)$"),
(instregex "WRTEE(I)?$"),
ATTN,
CLRBHRB,
MFBHRBE,
MBAR,
MSYNC,
SLBSYNC,
SLBFEEo,
NAP,
STOP,
TRAP,
RFCI,
RFDI,
RFMCI,
SC,
DCBA,
DCBI,
DCCCI,
ICCCI
)> { let Unsupported = 1; }
Index: stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td (revision 356465)
@@ -1,1481 +1,1471 @@
//===-- PPCInstr64Bit.td - The PowerPC 64-bit Support ------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the PowerPC 64-bit instructions. These patterns are used
// both when in ppc64 mode and when in "use 64-bit extensions in 32-bit" mode.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// 64-bit operands.
//
def s16imm64 : Operand<i64> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
let DecoderMethod = "decodeSImmOperand<16>";
}
def u16imm64 : Operand<i64> {
let PrintMethod = "printU16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<16>";
}
def s17imm64 : Operand<i64> {
// This operand type is used for addis/lis to allow the assembler parser
// to accept immediates in the range -65536..65535 for compatibility with
// the GNU assembler. The operand is treated as 16-bit otherwise.
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS17ImmAsmOperand;
let DecoderMethod = "decodeSImmOperand<16>";
}
def tocentry : Operand<iPTR> {
let MIOperandInfo = (ops i64imm:$imm);
}
def tlsreg : Operand<i64> {
let EncoderMethod = "getTLSRegEncoding";
let ParserMatchClass = PPCTLSRegOperand;
}
def tlsgd : Operand<i64> {}
def tlscall : Operand<i64> {
let PrintMethod = "printTLSCall";
let MIOperandInfo = (ops calltarget:$func, tlsgd:$sym);
let EncoderMethod = "getTLSCallEncoding";
}
//===----------------------------------------------------------------------===//
// 64-bit transformation functions.
//
def SHL64 : SDNodeXForm<imm, [{
// Transformation function: 63 - imm
return getI32Imm(63 - N->getZExtValue(), SDLoc(N));
}]>;
def SRL64 : SDNodeXForm<imm, [{
// Transformation function: 64 - imm
return N->getZExtValue() ? getI32Imm(64 - N->getZExtValue(), SDLoc(N))
: getI32Imm(0, SDLoc(N));
}]>;
//===----------------------------------------------------------------------===//
// Calls.
//
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isReturn = 1, Uses = [LR8, RM] in
def BLR8 : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB,
[(retflag)]>, Requires<[In64BitMode]>;
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in {
def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>,
Requires<[In64BitMode]>;
def BCCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
"b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB,
[]>,
Requires<[In64BitMode]>;
def BCCTR8 : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$bi),
"bcctr 12, $bi, 0", IIC_BrB, []>,
Requires<[In64BitMode]>;
def BCCTR8n : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$bi),
"bcctr 4, $bi, 0", IIC_BrB, []>,
Requires<[In64BitMode]>;
}
}
let Defs = [LR8] in
def MovePCtoLR8 : PPCEmitTimePseudo<(outs), (ins), "#MovePCtoLR8", []>,
PPC970_Unit_BRU;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let Defs = [CTR8], Uses = [CTR8] in {
def BDZ8 : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
"bdz $dst">;
def BDNZ8 : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
"bdnz $dst">;
}
let isReturn = 1, Defs = [CTR8], Uses = [CTR8, LR8, RM] in {
def BDZLR8 : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
"bdzlr", IIC_BrB, []>;
def BDNZLR8 : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
"bdnzlr", IIC_BrB, []>;
}
}
let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8 : IForm<18, 0, 1, (outs), (ins calltarget:$func),
"bl $func", IIC_BrB, []>; // See Pat patterns below.
def BL8_TLS : IForm<18, 0, 1, (outs), (ins tlscall:$func),
"bl $func", IIC_BrB, []>;
def BLA8 : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
"bla $func", IIC_BrB, [(PPCcall (i64 imm:$func))]>;
}
let Uses = [RM], isCodeGenOnly = 1 in {
def BL8_NOP : IForm_and_DForm_4_zero<18, 0, 1, 24,
(outs), (ins calltarget:$func),
"bl $func\n\tnop", IIC_BrB, []>;
def BL8_NOP_TLS : IForm_and_DForm_4_zero<18, 0, 1, 24,
(outs), (ins tlscall:$func),
"bl $func\n\tnop", IIC_BrB, []>;
def BLA8_NOP : IForm_and_DForm_4_zero<18, 1, 1, 24,
(outs), (ins abscalltarget:$func),
"bla $func\n\tnop", IIC_BrB,
[(PPCcall_nop (i64 imm:$func))]>;
}
let Uses = [CTR8, RM] in {
def BCTRL8 : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins),
"bctrl", IIC_BrB, [(PPCbctrl)]>,
Requires<[In64BitMode]>;
let isCodeGenOnly = 1 in {
def BCCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
"b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB,
[]>,
Requires<[In64BitMode]>;
def BCCTRL8 : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$bi),
"bcctrl 12, $bi, 0", IIC_BrB, []>,
Requires<[In64BitMode]>;
def BCCTRL8n : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$bi),
"bcctrl 4, $bi, 0", IIC_BrB, []>,
Requires<[In64BitMode]>;
}
}
}
let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1,
Defs = [LR8, X2], Uses = [CTR8, RM], RST = 2 in {
def BCTRL8_LDinto_toc :
XLForm_2_ext_and_DSForm_1<19, 528, 20, 0, 1, 58, 0, (outs),
(ins memrix:$src),
"bctrl\n\tld 2, $src", IIC_BrB,
[(PPCbctrl_load_toc iaddrX4:$src)]>,
Requires<[In64BitMode]>;
}
} // Interpretation64Bit
// FIXME: Duplicating this for the asm parser should be unnecessary, but the
// previous definition must be marked as CodeGen only to prevent decoding
// conflicts.
let Interpretation64Bit = 1, isAsmParserOnly = 1 in
let isCall = 1, PPC970_Unit = 7, Defs = [LR8], Uses = [RM] in
def BL8_TLS_ : IForm<18, 0, 1, (outs), (ins tlscall:$func),
"bl $func", IIC_BrB, []>;
// Calls
def : Pat<(PPCcall (i64 tglobaladdr:$dst)),
(BL8 tglobaladdr:$dst)>;
def : Pat<(PPCcall_nop (i64 tglobaladdr:$dst)),
(BL8_NOP tglobaladdr:$dst)>;
def : Pat<(PPCcall (i64 texternalsym:$dst)),
(BL8 texternalsym:$dst)>;
def : Pat<(PPCcall_nop (i64 texternalsym:$dst)),
(BL8_NOP texternalsym:$dst)>;
// Calls for AIX
def : Pat<(PPCcall (i64 mcsym:$dst)),
(BL8 mcsym:$dst)>;
def : Pat<(PPCcall_nop (i64 mcsym:$dst)),
(BL8_NOP mcsym:$dst)>;
// Atomic operations
// FIXME: some of these might be used with constant operands. This will result
// in constant materialization instructions that may be redundant. We currently
// clean this up in PPCMIPeephole with calls to
// PPCInstrInfo::convertToImmediateForm() but we should probably not emit them
// in the first place.
let Defs = [CR0] in {
def ATOMIC_LOAD_ADD_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_ADD_I64",
[(set i64:$dst, (atomic_load_add_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_SUB_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_SUB_I64",
[(set i64:$dst, (atomic_load_sub_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_OR_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_OR_I64",
[(set i64:$dst, (atomic_load_or_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_XOR_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_XOR_I64",
[(set i64:$dst, (atomic_load_xor_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_AND_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_AND_i64",
[(set i64:$dst, (atomic_load_and_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_NAND_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_NAND_I64",
[(set i64:$dst, (atomic_load_nand_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_MIN_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_MIN_I64",
[(set i64:$dst, (atomic_load_min_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_MAX_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_MAX_I64",
[(set i64:$dst, (atomic_load_max_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_UMIN_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_UMIN_I64",
[(set i64:$dst, (atomic_load_umin_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_LOAD_UMAX_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_UMAX_I64",
[(set i64:$dst, (atomic_load_umax_64 xoaddr:$ptr, i64:$incr))]>;
def ATOMIC_CMP_SWAP_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$old, g8rc:$new), "#ATOMIC_CMP_SWAP_I64",
[(set i64:$dst, (atomic_cmp_swap_64 xoaddr:$ptr, i64:$old, i64:$new))]>;
def ATOMIC_SWAP_I64 : PPCCustomInserterPseudo<
(outs g8rc:$dst), (ins memrr:$ptr, g8rc:$new), "#ATOMIC_SWAP_I64",
[(set i64:$dst, (atomic_swap_64 xoaddr:$ptr, i64:$new))]>;
}
// Instructions to support atomic operations
let mayLoad = 1, hasSideEffects = 0 in {
def LDARX : XForm_1_memOp<31, 84, (outs g8rc:$rD), (ins memrr:$ptr),
"ldarx $rD, $ptr", IIC_LdStLDARX, []>;
// Instruction to support lock versions of atomics
// (EH=1 - see Power ISA 2.07 Book II 4.4.2)
def LDARXL : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr),
"ldarx $rD, $ptr, 1", IIC_LdStLDARX, []>, isDOT;
let hasExtraDefRegAllocReq = 1 in
def LDAT : X_RD5_RS5_IM5<31, 614, (outs g8rc:$rD), (ins g8rc:$rA, u5imm:$FC),
"ldat $rD, $rA, $FC", IIC_LdStLoad>, isPPC64,
Requires<[IsISA3_0]>;
}
let Defs = [CR0], mayStore = 1, mayLoad = 0, hasSideEffects = 0 in
def STDCX : XForm_1_memOp<31, 214, (outs), (ins g8rc:$rS, memrr:$dst),
"stdcx. $rS, $dst", IIC_LdStSTDCX, []>, isDOT;
let mayStore = 1, mayLoad = 0, hasSideEffects = 0 in
def STDAT : X_RD5_RS5_IM5<31, 742, (outs), (ins g8rc:$rS, g8rc:$rA, u5imm:$FC),
"stdat $rS, $rA, $FC", IIC_LdStStore>, isPPC64,
Requires<[IsISA3_0]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi8 :PPCEmitTimePseudo< (outs),
(ins calltarget:$dst, i32imm:$offset),
"#TC_RETURNd8 $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNai8 :PPCEmitTimePseudo<(outs), (ins abscalltarget:$func, i32imm:$offset),
"#TC_RETURNa8 $func $offset",
[(PPCtc_return (i64 imm:$func), imm:$offset)]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNri8 : PPCEmitTimePseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset),
"#TC_RETURNr8 $dst $offset",
[]>;
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR8, RM] in
def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>,
Requires<[In64BitMode]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILB8 : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
"b $dst", IIC_BrB,
[]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILBA8 : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
"ba $dst", IIC_BrB,
[]>;
} // Interpretation64Bit
def : Pat<(PPCtc_return (i64 tglobaladdr:$dst), imm:$imm),
(TCRETURNdi8 tglobaladdr:$dst, imm:$imm)>;
def : Pat<(PPCtc_return (i64 texternalsym:$dst), imm:$imm),
(TCRETURNdi8 texternalsym:$dst, imm:$imm)>;
def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
(TCRETURNri8 CTRRC8:$dst, imm:$imm)>;
// 64-bit CR instructions
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let hasSideEffects = 0 in {
// mtocrf's input needs to be prepared by shifting by an amount dependent
// on the cr register selected. Thus, post-ra anti-dep breaking must not
// later change that register assignment.
let hasExtraDefRegAllocReq = 1 in {
def MTOCRF8: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins g8rc:$ST),
"mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// Similarly to mtocrf, the mask for mtcrf must be prepared in a way that
// is dependent on the cr fields being set.
def MTCRF8 : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, g8rc:$rS),
"mtcrf $FXM, $rS", IIC_BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // hasExtraDefRegAllocReq = 1
// mfocrf's input needs to be prepared by shifting by an amount dependent
// on the cr register selected. Thus, post-ra anti-dep breaking must not
// later change that register assignment.
let hasExtraSrcRegAllocReq = 1 in {
def MFOCRF8: XFXForm_5a<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM),
"mfocrf $rT, $FXM", IIC_SprMFCRF>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// Similarly to mfocrf, the mask for mfcrf must be prepared in a way that
// is dependent on the cr fields being copied.
def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins),
"mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // hasExtraSrcRegAllocReq = 1
} // hasSideEffects = 0
// While longjmp is a control-flow barrier (fallthrough isn't allowed), setjmp
// is not.
let hasSideEffects = 1 in {
let Defs = [CTR8] in
def EH_SjLj_SetJmp64 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf),
"#EH_SJLJ_SETJMP64",
[(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>,
Requires<[In64BitMode]>;
}
let hasSideEffects = 1, isBarrier = 1 in {
let isTerminator = 1 in
def EH_SjLj_LongJmp64 : PPCCustomInserterPseudo<(outs), (ins memr:$buf),
"#EH_SJLJ_LONGJMP64",
[(PPCeh_sjlj_longjmp addr:$buf)]>,
Requires<[In64BitMode]>;
}
def MFSPR8 : XFXForm_1<31, 339, (outs g8rc:$RT), (ins i32imm:$SPR),
"mfspr $RT, $SPR", IIC_SprMFSPR>;
def MTSPR8 : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, g8rc:$RT),
"mtspr $SPR, $RT", IIC_SprMTSPR>;
//===----------------------------------------------------------------------===//
// 64-bit SPR manipulation instrs.
let Uses = [CTR8] in {
def MFCTR8 : XFXForm_1_ext<31, 339, 9, (outs g8rc:$rT), (ins),
"mfctr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Pattern = [(PPCmtctr i64:$rS)], Defs = [CTR8] in {
def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
"mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let hasSideEffects = 1, Defs = [CTR8] in {
let Pattern = [(int_set_loop_iterations i64:$rS)] in
def MTCTR8loop : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
"mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Pattern = [(set i64:$rT, readcyclecounter)] in
def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs g8rc:$rT), (ins),
"mfspr $rT, 268", IIC_SprMFTB>,
PPC970_DGroup_First, PPC970_Unit_FXU;
// Note that encoding mftb using mfspr is now the preferred form,
// and has been since at least ISA v2.03. The mftb instruction has
// now been phased out. Using mfspr, however, is known not to work on
// the POWER3.
let Defs = [X1], Uses = [X1] in
def DYNALLOC8 : PPCEmitTimePseudo<(outs g8rc:$result), (ins g8rc:$negsize, memri:$fpsi),"#DYNALLOC8",
[(set i64:$result,
(PPCdynalloc i64:$negsize, iaddr:$fpsi))]>;
def DYNAREAOFFSET8 : PPCEmitTimePseudo<(outs i64imm:$result), (ins memri:$fpsi), "#DYNAREAOFFSET8",
[(set i64:$result, (PPCdynareaoffset iaddr:$fpsi))]>;
let Defs = [LR8] in {
def MTLR8 : XFXForm_7_ext<31, 467, 8, (outs), (ins g8rc:$rS),
"mtlr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Uses = [LR8] in {
def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs g8rc:$rT), (ins),
"mflr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
} // Interpretation64Bit
//===----------------------------------------------------------------------===//
// Fixed point instructions.
//
let PPC970_Unit = 1 in { // FXU Operations.
let Interpretation64Bit = 1 in {
let hasSideEffects = 0 in {
let isCodeGenOnly = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins s16imm64:$imm),
"li $rD, $imm", IIC_IntSimple,
[(set i64:$rD, imm64SExt16:$imm)]>;
def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins s17imm64:$imm),
"lis $rD, $imm", IIC_IntSimple,
[(set i64:$rD, imm16ShiftedSExt:$imm)]>;
}
// Logical ops.
let isCommutable = 1 in {
defm NAND8: XForm_6r<31, 476, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"nand", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (and i64:$rS, i64:$rB)))]>;
defm AND8 : XForm_6r<31, 28, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"and", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (and i64:$rS, i64:$rB))]>;
} // isCommutable
defm ANDC8: XForm_6r<31, 60, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"andc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (and i64:$rS, (not i64:$rB)))]>;
let isCommutable = 1 in {
defm OR8 : XForm_6r<31, 444, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"or", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (or i64:$rS, i64:$rB))]>;
defm NOR8 : XForm_6r<31, 124, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"nor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (or i64:$rS, i64:$rB)))]>;
} // isCommutable
defm ORC8 : XForm_6r<31, 412, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"orc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (or i64:$rS, (not i64:$rB)))]>;
let isCommutable = 1 in {
defm EQV8 : XForm_6r<31, 284, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"eqv", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (xor i64:$rS, i64:$rB)))]>;
defm XOR8 : XForm_6r<31, 316, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"xor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (xor i64:$rS, i64:$rB))]>;
} // let isCommutable = 1
// Logical ops with immediate.
let Defs = [CR0] in {
def ANDIo8 : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"andi. $dst, $src1, $src2", IIC_IntGeneral,
[(set i64:$dst, (and i64:$src1, immZExt16:$src2))]>,
isDOT;
def ANDISo8 : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"andis. $dst, $src1, $src2", IIC_IntGeneral,
[(set i64:$dst, (and i64:$src1, imm16ShiftedZExt:$src2))]>,
isDOT;
}
def ORI8 : DForm_4<24, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"ori $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (or i64:$src1, immZExt16:$src2))]>;
def ORIS8 : DForm_4<25, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"oris $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (or i64:$src1, imm16ShiftedZExt:$src2))]>;
def XORI8 : DForm_4<26, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"xori $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (xor i64:$src1, immZExt16:$src2))]>;
def XORIS8 : DForm_4<27, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"xoris $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (xor i64:$src1, imm16ShiftedZExt:$src2))]>;
let isCommutable = 1 in
-defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "add", "$rT, $rA, $rB", IIC_IntSimple,
- [(set i64:$rT, (add i64:$rA, i64:$rB))]>;
+defm ADD8 : XOForm_1rx<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+ "add", "$rT, $rA, $rB", IIC_IntSimple,
+ [(set i64:$rT, (add i64:$rA, i64:$rB))]>;
// ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the
// initial-exec thread-local storage model. We need to forbid r0 here -
// while it works for add just fine, the linker can relax this to local-exec
// addi, which won't work for r0.
def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc_nox0:$rA, tlsreg:$rB),
"add $rT, $rA, $rB", IIC_IntSimple,
[(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>;
let mayLoad = 1 in {
def LBZXTLS : XForm_1<31, 87, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lbzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LHZXTLS : XForm_1<31, 279, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lhzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LWZXTLS : XForm_1<31, 23, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lwzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LDXTLS : XForm_1<31, 21, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"ldx $rD, $rA, $rB", IIC_LdStLD, []>, isPPC64;
def LBZXTLS_32 : XForm_1<31, 87, (outs gprc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lbzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LHZXTLS_32 : XForm_1<31, 279, (outs gprc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lhzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LWZXTLS_32 : XForm_1<31, 23, (outs gprc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lwzx $rD, $rA, $rB", IIC_LdStLoad, []>;
}
let mayStore = 1 in {
def STBXTLS : XForm_8<31, 215, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stbx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STHXTLS : XForm_8<31, 407, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"sthx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STWXTLS : XForm_8<31, 151, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stwx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STDXTLS : XForm_8<31, 149, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stdx $rS, $rA, $rB", IIC_LdStSTD, []>, isPPC64,
PPC970_DGroup_Cracked;
def STBXTLS_32 : XForm_8<31, 215, (outs), (ins gprc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stbx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STHXTLS_32 : XForm_8<31, 407, (outs), (ins gprc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"sthx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STWXTLS_32 : XForm_8<31, 151, (outs), (ins gprc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stwx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
}
let isCommutable = 1 in
defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"addc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (addc i64:$rA, i64:$rB))]>,
PPC970_DGroup_Cracked;
let Defs = [CARRY] in
def ADDIC8 : DForm_2<12, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"addic $rD, $rA, $imm", IIC_IntGeneral,
[(set i64:$rD, (addc i64:$rA, imm64SExt16:$imm))]>;
def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s16imm64:$imm),
"addi $rD, $rA, $imm", IIC_IntSimple,
[(set i64:$rD, (add i64:$rA, imm64SExt16:$imm))]>;
def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s17imm64:$imm),
"addis $rD, $rA, $imm", IIC_IntSimple,
[(set i64:$rD, (add i64:$rA, imm16ShiftedSExt:$imm))]>;
let Defs = [CARRY] in {
def SUBFIC8: DForm_2< 8, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"subfic $rD, $rA, $imm", IIC_IntGeneral,
[(set i64:$rD, (subc imm64SExt16:$imm, i64:$rA))]>;
}
defm SUBFC8 : XOForm_1rc<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"subfc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (subc i64:$rB, i64:$rA))]>,
PPC970_DGroup_Cracked;
-defm SUBF8 : XOForm_1r<31, 40, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "subf", "$rT, $rA, $rB", IIC_IntGeneral,
- [(set i64:$rT, (sub i64:$rB, i64:$rA))]>;
+defm SUBF8 : XOForm_1rx<31, 40, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+ "subf", "$rT, $rA, $rB", IIC_IntGeneral,
+ [(set i64:$rT, (sub i64:$rB, i64:$rA))]>;
defm NEG8 : XOForm_3r<31, 104, 0, (outs g8rc:$rT), (ins g8rc:$rA),
"neg", "$rT, $rA", IIC_IntSimple,
[(set i64:$rT, (ineg i64:$rA))]>;
let Uses = [CARRY] in {
let isCommutable = 1 in
defm ADDE8 : XOForm_1rc<31, 138, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"adde", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, i64:$rB))]>;
defm ADDME8 : XOForm_3rc<31, 234, 0, (outs g8rc:$rT), (ins g8rc:$rA),
"addme", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, -1))]>;
defm ADDZE8 : XOForm_3rc<31, 202, 0, (outs g8rc:$rT), (ins g8rc:$rA),
"addze", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, 0))]>;
defm SUBFE8 : XOForm_1rc<31, 136, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"subfe", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (sube i64:$rB, i64:$rA))]>;
defm SUBFME8 : XOForm_3rc<31, 232, 0, (outs g8rc:$rT), (ins g8rc:$rA),
"subfme", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (sube -1, i64:$rA))]>;
defm SUBFZE8 : XOForm_3rc<31, 200, 0, (outs g8rc:$rT), (ins g8rc:$rA),
"subfze", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (sube 0, i64:$rA))]>;
}
} // isCodeGenOnly
// FIXME: Duplicating this for the asm parser should be unnecessary, but the
// previous definition must be marked as CodeGen only to prevent decoding
// conflicts.
let isAsmParserOnly = 1 in {
def ADD8TLS_ : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB),
"add $rT, $rA, $rB", IIC_IntSimple, []>;
let mayLoad = 1 in {
def LBZXTLS_ : XForm_1<31, 87, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lbzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LHZXTLS_ : XForm_1<31, 279, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lhzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LWZXTLS_ : XForm_1<31, 23, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"lwzx $rD, $rA, $rB", IIC_LdStLoad, []>;
def LDXTLS_ : XForm_1<31, 21, (outs g8rc:$rD), (ins ptr_rc_nor0:$rA, tlsreg:$rB),
"ldx $rD, $rA, $rB", IIC_LdStLD, []>, isPPC64;
}
let mayStore = 1 in {
def STBXTLS_ : XForm_8<31, 215, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stbx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STHXTLS_ : XForm_8<31, 407, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"sthx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STWXTLS_ : XForm_8<31, 151, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stwx $rS, $rA, $rB", IIC_LdStStore, []>,
PPC970_DGroup_Cracked;
def STDXTLS_ : XForm_8<31, 149, (outs), (ins g8rc:$rS, ptr_rc_nor0:$rA, tlsreg:$rB),
"stdx $rS, $rA, $rB", IIC_LdStSTD, []>, isPPC64,
PPC970_DGroup_Cracked;
}
}
let isCommutable = 1 in {
defm MULHD : XOForm_1r<31, 73, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"mulhd", "$rT, $rA, $rB", IIC_IntMulHW,
[(set i64:$rT, (mulhs i64:$rA, i64:$rB))]>;
defm MULHDU : XOForm_1r<31, 9, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"mulhdu", "$rT, $rA, $rB", IIC_IntMulHWU,
[(set i64:$rT, (mulhu i64:$rA, i64:$rB))]>;
} // isCommutable
}
} // Interpretation64Bit
let isCompare = 1, hasSideEffects = 0 in {
def CMPD : XForm_16_ext<31, 0, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
"cmpd $crD, $rA, $rB", IIC_IntCompare>, isPPC64;
def CMPLD : XForm_16_ext<31, 32, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
"cmpld $crD, $rA, $rB", IIC_IntCompare>, isPPC64;
def CMPDI : DForm_5_ext<11, (outs crrc:$crD), (ins g8rc:$rA, s16imm64:$imm),
"cmpdi $crD, $rA, $imm", IIC_IntCompare>, isPPC64;
def CMPLDI : DForm_6_ext<10, (outs crrc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"cmpldi $dst, $src1, $src2",
IIC_IntCompare>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def CMPRB8 : X_BF3_L1_RS5_RS5<31, 192, (outs crbitrc:$BF),
(ins u1imm:$L, g8rc:$rA, g8rc:$rB),
"cmprb $BF, $L, $rA, $rB", IIC_IntCompare, []>,
Requires<[IsISA3_0]>;
def CMPEQB : X_BF3_RS5_RS5<31, 224, (outs crbitrc:$BF),
(ins g8rc:$rA, g8rc:$rB), "cmpeqb $BF, $rA, $rB",
IIC_IntCompare, []>, Requires<[IsISA3_0]>;
}
let hasSideEffects = 0 in {
defm SLD : XForm_6r<31, 27, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
"sld", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCshl i64:$rS, i32:$rB))]>, isPPC64;
defm SRD : XForm_6r<31, 539, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
"srd", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCsrl i64:$rS, i32:$rB))]>, isPPC64;
defm SRAD : XForm_6rc<31, 794, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
"srad", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCsra i64:$rS, i32:$rB))]>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
defm CNTLZW8 : XForm_11r<31, 26, (outs g8rc:$rA), (ins g8rc:$rS),
"cntlzw", "$rA, $rS", IIC_IntGeneral, []>;
defm CNTTZW8 : XForm_11r<31, 538, (outs g8rc:$rA), (ins g8rc:$rS),
"cnttzw", "$rA, $rS", IIC_IntGeneral, []>,
Requires<[IsISA3_0]>;
defm EXTSB8 : XForm_11r<31, 954, (outs g8rc:$rA), (ins g8rc:$rS),
"extsb", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i8))]>;
defm EXTSH8 : XForm_11r<31, 922, (outs g8rc:$rA), (ins g8rc:$rS),
"extsh", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i16))]>;
defm SLW8 : XForm_6r<31, 24, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"slw", "$rA, $rS, $rB", IIC_IntGeneral, []>;
defm SRW8 : XForm_6r<31, 536, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"srw", "$rA, $rS, $rB", IIC_IntGeneral, []>;
} // Interpretation64Bit
// For fast-isel:
let isCodeGenOnly = 1 in {
def EXTSB8_32_64 : XForm_11<31, 954, (outs g8rc:$rA), (ins gprc:$rS),
"extsb $rA, $rS", IIC_IntSimple, []>, isPPC64;
def EXTSH8_32_64 : XForm_11<31, 922, (outs g8rc:$rA), (ins gprc:$rS),
"extsh $rA, $rS", IIC_IntSimple, []>, isPPC64;
} // isCodeGenOnly for fast-isel
defm EXTSW : XForm_11r<31, 986, (outs g8rc:$rA), (ins g8rc:$rS),
"extsw", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i32))]>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm EXTSW_32_64 : XForm_11r<31, 986, (outs g8rc:$rA), (ins gprc:$rS),
"extsw", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext i32:$rS))]>, isPPC64;
let isCodeGenOnly = 1 in
def EXTSW_32 : XForm_11<31, 986, (outs gprc:$rA), (ins gprc:$rS),
"extsw $rA, $rS", IIC_IntSimple,
[]>, isPPC64;
defm SRADI : XSForm_1rc<31, 413, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH),
"sradi", "$rA, $rS, $SH", IIC_IntRotateDI,
[(set i64:$rA, (sra i64:$rS, (i32 imm:$SH)))]>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm EXTSWSLI_32_64 : XSForm_1r<31, 445, (outs g8rc:$rA),
(ins gprc:$rS, u6imm:$SH),
"extswsli", "$rA, $rS, $SH", IIC_IntRotateDI,
[(set i64:$rA,
(PPCextswsli i32:$rS, (i32 imm:$SH)))]>,
isPPC64, Requires<[IsISA3_0]>;
defm EXTSWSLI : XSForm_1rc<31, 445, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH),
"extswsli", "$rA, $rS, $SH", IIC_IntRotateDI,
[]>, isPPC64, Requires<[IsISA3_0]>;
// For fast-isel:
let isCodeGenOnly = 1, Defs = [CARRY] in
def SRADI_32 : XSForm_1<31, 413, (outs gprc:$rA), (ins gprc:$rS, u6imm:$SH),
"sradi $rA, $rS, $SH", IIC_IntRotateDI, []>, isPPC64;
defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS),
"cntlzd", "$rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctlz i64:$rS))]>;
defm CNTTZD : XForm_11r<31, 570, (outs g8rc:$rA), (ins g8rc:$rS),
"cnttzd", "$rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (cttz i64:$rS))]>, Requires<[IsISA3_0]>;
def POPCNTD : XForm_11<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
"popcntd $rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctpop i64:$rS))]>;
def BPERMD : XForm_6<31, 252, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"bpermd $rA, $rS, $rB", IIC_IntGeneral,
[(set i64:$rA, (int_ppc_bpermd g8rc:$rS, g8rc:$rB))]>,
isPPC64, Requires<[HasBPERMD]>;
let isCodeGenOnly = 1, isCommutable = 1 in
def CMPB8 : XForm_6<31, 508, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"cmpb $rA, $rS, $rB", IIC_IntGeneral,
[(set i64:$rA, (PPCcmpb i64:$rS, i64:$rB))]>;
// popcntw also does a population count on the high 32 bits (storing the
// results in the high 32-bits of the output). We'll ignore that here (which is
// safe because we never separately use the high part of the 64-bit registers).
def POPCNTW : XForm_11<31, 378, (outs gprc:$rA), (ins gprc:$rS),
"popcntw $rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (ctpop i32:$rS))]>;
def POPCNTB : XForm_11<31, 122, (outs gprc:$rA), (ins gprc:$rS),
"popcntb $rA, $rS", IIC_IntGeneral, []>;
defm DIVD : XOForm_1rcr<31, 489, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"divd", "$rT, $rA, $rB", IIC_IntDivD,
[(set i64:$rT, (sdiv i64:$rA, i64:$rB))]>, isPPC64;
defm DIVDU : XOForm_1rcr<31, 457, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"divdu", "$rT, $rA, $rB", IIC_IntDivD,
[(set i64:$rT, (udiv i64:$rA, i64:$rB))]>, isPPC64;
-def DIVDE : XOForm_1<31, 425, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divde $rT, $rA, $rB", IIC_IntDivD,
- [(set i64:$rT, (int_ppc_divde g8rc:$rA, g8rc:$rB))]>,
- isPPC64, Requires<[HasExtDiv]>;
+defm DIVDE : XOForm_1rcr<31, 425, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+ "divde", "$rT, $rA, $rB", IIC_IntDivD,
+ [(set i64:$rT, (int_ppc_divde g8rc:$rA, g8rc:$rB))]>,
+ isPPC64, Requires<[HasExtDiv]>;
let Predicates = [IsISA3_0] in {
def MADDHD : VAForm_1a<48, (outs g8rc :$RT), (ins g8rc:$RA, g8rc:$RB, g8rc:$RC),
"maddhd $RT, $RA, $RB, $RC", IIC_IntMulHD, []>, isPPC64;
def MADDHDU : VAForm_1a<49,
(outs g8rc :$RT), (ins g8rc:$RA, g8rc:$RB, g8rc:$RC),
"maddhdu $RT, $RA, $RB, $RC", IIC_IntMulHD, []>, isPPC64;
def MADDLD : VAForm_1a<51, (outs gprc :$RT), (ins gprc:$RA, gprc:$RB, gprc:$RC),
"maddld $RT, $RA, $RB, $RC", IIC_IntMulHD,
[(set i32:$RT, (add_without_simm16 (mul_without_simm16 i32:$RA, i32:$RB), i32:$RC))]>,
isPPC64;
def SETB : XForm_44<31, 128, (outs gprc:$RT), (ins crrc:$BFA),
"setb $RT, $BFA", IIC_IntGeneral>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
def MADDLD8 : VAForm_1a<51,
(outs g8rc :$RT), (ins g8rc:$RA, g8rc:$RB, g8rc:$RC),
"maddld $RT, $RA, $RB, $RC", IIC_IntMulHD,
[(set i64:$RT, (add_without_simm16 (mul_without_simm16 i64:$RA, i64:$RB), i64:$RC))]>,
isPPC64;
def SETB8 : XForm_44<31, 128, (outs g8rc:$RT), (ins crrc:$BFA),
"setb $RT, $BFA", IIC_IntGeneral>, isPPC64;
}
def DARN : XForm_45<31, 755, (outs g8rc:$RT), (ins i32imm:$L),
"darn $RT, $L", IIC_LdStLD>, isPPC64;
def ADDPCIS : DXForm<19, 2, (outs g8rc:$RT), (ins i32imm:$D),
"addpcis $RT, $D", IIC_BrB, []>, isPPC64;
def MODSD : XForm_8<31, 777, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"modsd $rT, $rA, $rB", IIC_IntDivW,
[(set i64:$rT, (srem i64:$rA, i64:$rB))]>;
def MODUD : XForm_8<31, 265, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"modud $rT, $rA, $rB", IIC_IntDivW,
[(set i64:$rT, (urem i64:$rA, i64:$rB))]>;
}
-let Defs = [CR0] in
-def DIVDEo : XOForm_1<31, 425, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divde. $rT, $rA, $rB", IIC_IntDivD,
- []>, isDOT, PPC970_DGroup_Cracked, PPC970_DGroup_First,
- isPPC64, Requires<[HasExtDiv]>;
-def DIVDEU : XOForm_1<31, 393, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divdeu $rT, $rA, $rB", IIC_IntDivD,
- [(set i64:$rT, (int_ppc_divdeu g8rc:$rA, g8rc:$rB))]>,
- isPPC64, Requires<[HasExtDiv]>;
-let Defs = [CR0] in
-def DIVDEUo : XOForm_1<31, 393, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divdeu. $rT, $rA, $rB", IIC_IntDivD,
- []>, isDOT, PPC970_DGroup_Cracked, PPC970_DGroup_First,
- isPPC64, Requires<[HasExtDiv]>;
+defm DIVDEU : XOForm_1rcr<31, 393, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+ "divdeu", "$rT, $rA, $rB", IIC_IntDivD,
+ [(set i64:$rT, (int_ppc_divdeu g8rc:$rA, g8rc:$rB))]>,
+ isPPC64, Requires<[HasExtDiv]>;
let isCommutable = 1 in
-defm MULLD : XOForm_1r<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "mulld", "$rT, $rA, $rB", IIC_IntMulHD,
- [(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64;
+defm MULLD : XOForm_1rx<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+ "mulld", "$rT, $rA, $rB", IIC_IntMulHD,
+ [(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def MULLI8 : DForm_2<7, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"mulli $rD, $rA, $imm", IIC_IntMulLI,
[(set i64:$rD, (mul i64:$rA, imm64SExt16:$imm))]>;
}
let hasSideEffects = 0 in {
defm RLDIMI : MDForm_1r<30, 3, (outs g8rc:$rA),
(ins g8rc:$rSi, g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldimi", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64, RegConstraint<"$rSi = $rA">,
NoEncode<"$rSi">;
// Rotate instructions.
defm RLDCL : MDSForm_1r<30, 8,
(outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
"rldcl", "$rA, $rS, $rB, $MBE", IIC_IntRotateD,
[]>, isPPC64;
defm RLDCR : MDSForm_1r<30, 9,
(outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
"rldcr", "$rA, $rS, $rB, $MBE", IIC_IntRotateD,
[]>, isPPC64;
defm RLDICL : MDForm_1r<30, 0,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicl", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
// For fast-isel:
let isCodeGenOnly = 1 in
def RLDICL_32_64 : MDForm_1<30, 0,
(outs g8rc:$rA),
(ins gprc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicl $rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
// End fast-isel.
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm RLDICL_32 : MDForm_1r<30, 0,
(outs gprc:$rA),
(ins gprc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicl", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
defm RLDICR : MDForm_1r<30, 1,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicr", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
let isCodeGenOnly = 1 in
def RLDICR_32 : MDForm_1<30, 1,
(outs gprc:$rA), (ins gprc:$rS, u6imm:$SH, u6imm:$MBE),
"rldicr $rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
defm RLDIC : MDForm_1r<30, 2,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldic", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA),
(ins g8rc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
"rlwinm", "$rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>;
defm RLWNM8 : MForm_2r<23, (outs g8rc:$rA),
(ins g8rc:$rS, g8rc:$rB, u5imm:$MB, u5imm:$ME),
"rlwnm", "$rA, $rS, $rB, $MB, $ME", IIC_IntGeneral,
[]>;
// RLWIMI can be commuted if the rotate amount is zero.
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm RLWIMI8 : MForm_2r<20, (outs g8rc:$rA),
(ins g8rc:$rSi, g8rc:$rS, u5imm:$SH, u5imm:$MB,
u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME",
IIC_IntRotate, []>, PPC970_DGroup_Cracked,
RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">;
let isSelect = 1 in
def ISEL8 : AForm_4<31, 15,
(outs g8rc:$rT), (ins g8rc_nox0:$rA, g8rc:$rB, crbitrc:$cond),
"isel $rT, $rA, $rB, $cond", IIC_IntISEL,
[]>;
} // Interpretation64Bit
} // hasSideEffects = 0
} // End FXU Operations.
//===----------------------------------------------------------------------===//
// Load/Store instructions.
//
// Sign extending loads.
let PPC970_Unit = 2 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHA8: DForm_1<42, (outs g8rc:$rD), (ins memri:$src),
"lha $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi16 iaddr:$src))]>,
PPC970_DGroup_Cracked;
def LWA : DSForm_1<58, 2, (outs g8rc:$rD), (ins memrix:$src),
"lwa $rD, $src", IIC_LdStLWA,
[(set i64:$rD,
(aligned4sextloadi32 iaddrX4:$src))]>, isPPC64,
PPC970_DGroup_Cracked;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAX8: XForm_1_memOp<31, 343, (outs g8rc:$rD), (ins memrr:$src),
"lhax $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi16 xaddr:$src))]>,
PPC970_DGroup_Cracked;
def LWAX : XForm_1_memOp<31, 341, (outs g8rc:$rD), (ins memrr:$src),
"lwax $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi32 xaddrX4:$src))]>, isPPC64,
PPC970_DGroup_Cracked;
// For fast-isel:
let isCodeGenOnly = 1, mayLoad = 1 in {
def LWA_32 : DSForm_1<58, 2, (outs gprc:$rD), (ins memrix:$src),
"lwa $rD, $src", IIC_LdStLWA, []>, isPPC64,
PPC970_DGroup_Cracked;
def LWAX_32 : XForm_1_memOp<31, 341, (outs gprc:$rD), (ins memrr:$src),
"lwax $rD, $src", IIC_LdStLHA, []>, isPPC64,
PPC970_DGroup_Cracked;
} // end fast-isel isCodeGenOnly
// Update forms.
let mayLoad = 1, hasSideEffects = 0 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAU8 : DForm_1<43, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
"lhau $rD, $addr", IIC_LdStLHAU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
// NO LWAU!
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAUX8 : XForm_1_memOp<31, 375, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lhaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWAUX : XForm_1_memOp<31, 373, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lwaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
}
}
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
// Zero extending loads.
let PPC970_Unit = 2 in {
def LBZ8 : DForm_1<34, (outs g8rc:$rD), (ins memri:$src),
"lbz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi8 iaddr:$src))]>;
def LHZ8 : DForm_1<40, (outs g8rc:$rD), (ins memri:$src),
"lhz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ8 : DForm_1<32, (outs g8rc:$rD), (ins memri:$src),
"lwz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi32 iaddr:$src))]>, isPPC64;
def LBZX8 : XForm_1_memOp<31, 87, (outs g8rc:$rD), (ins memrr:$src),
"lbzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi8 xaddr:$src))]>;
def LHZX8 : XForm_1_memOp<31, 279, (outs g8rc:$rD), (ins memrr:$src),
"lhzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX8 : XForm_1_memOp<31, 23, (outs g8rc:$rD), (ins memrr:$src),
"lwzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi32 xaddr:$src))]>;
// Update forms.
let mayLoad = 1, hasSideEffects = 0 in {
def LBZU8 : DForm_1<35, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
"lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU8 : DForm_1<41, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
"lhzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU8 : DForm_1<33, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
"lwzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LBZUX8 : XForm_1_memOp<31, 119, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lbzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHZUX8 : XForm_1_memOp<31, 311, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lhzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX8 : XForm_1_memOp<31, 55, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lwzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
}
}
} // Interpretation64Bit
// Full 8-byte loads.
let PPC970_Unit = 2 in {
def LD : DSForm_1<58, 0, (outs g8rc:$rD), (ins memrix:$src),
"ld $rD, $src", IIC_LdStLD,
[(set i64:$rD, (aligned4load iaddrX4:$src))]>, isPPC64;
// The following four definitions are selected for small code model only.
// Otherwise, we need to create two instructions to form a 32-bit offset,
// so we have a custom matcher for TOC_ENTRY in PPCDAGToDAGIsel::Select().
def LDtoc: PPCEmitTimePseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
"#LDtoc",
[(set i64:$rD,
(PPCtoc_entry tglobaladdr:$disp, i64:$reg))]>, isPPC64;
def LDtocJTI: PPCEmitTimePseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
"#LDtocJTI",
[(set i64:$rD,
(PPCtoc_entry tjumptable:$disp, i64:$reg))]>, isPPC64;
def LDtocCPT: PPCEmitTimePseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
"#LDtocCPT",
[(set i64:$rD,
(PPCtoc_entry tconstpool:$disp, i64:$reg))]>, isPPC64;
def LDtocBA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
"#LDtocCPT",
[(set i64:$rD,
(PPCtoc_entry tblockaddress:$disp, i64:$reg))]>, isPPC64;
def LDX : XForm_1_memOp<31, 21, (outs g8rc:$rD), (ins memrr:$src),
"ldx $rD, $src", IIC_LdStLD,
[(set i64:$rD, (load xaddrX4:$src))]>, isPPC64;
def LDBRX : XForm_1_memOp<31, 532, (outs g8rc:$rD), (ins memrr:$src),
"ldbrx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (PPClbrx xoaddr:$src, i64))]>, isPPC64;
let mayLoad = 1, hasSideEffects = 0, isCodeGenOnly = 1 in {
def LHBRX8 : XForm_1_memOp<31, 790, (outs g8rc:$rD), (ins memrr:$src),
"lhbrx $rD, $src", IIC_LdStLoad, []>;
def LWBRX8 : XForm_1_memOp<31, 534, (outs g8rc:$rD), (ins memrr:$src),
"lwbrx $rD, $src", IIC_LdStLoad, []>;
}
let mayLoad = 1, hasSideEffects = 0 in {
def LDU : DSForm_1<58, 1, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrix:$addr),
"ldu $rD, $addr", IIC_LdStLDU,
[]>, RegConstraint<"$addr.reg = $ea_result">, isPPC64,
NoEncode<"$ea_result">;
def LDUX : XForm_1_memOp<31, 53, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"ldux $rD, $addr", IIC_LdStLDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
def LDMX : XForm_1<31, 309, (outs g8rc:$rD), (ins memrr:$src),
"ldmx $rD, $src", IIC_LdStLD, []>, isPPC64,
Requires<[IsISA3_0]>;
}
}
// Support for medium and large code model.
let hasSideEffects = 0 in {
let isReMaterializable = 1 in {
def ADDIStocHA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp),
"#ADDIStocHA", []>, isPPC64;
def ADDItocL: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp),
"#ADDItocL", []>, isPPC64;
}
let mayLoad = 1 in
def LDtocL: PPCEmitTimePseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc_nox0:$reg),
"#LDtocL", []>, isPPC64;
}
// Support for thread-local storage.
def ADDISgotTprelHA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDISgotTprelHA",
[(set i64:$rD,
(PPCaddisGotTprelHA i64:$reg,
tglobaltlsaddr:$disp))]>,
isPPC64;
def LDgotTprelL: PPCEmitTimePseudo<(outs g8rc:$rD), (ins s16imm64:$disp, g8rc_nox0:$reg),
"#LDgotTprelL",
[(set i64:$rD,
(PPCldGotTprelL tglobaltlsaddr:$disp, i64:$reg))]>,
isPPC64;
let Defs = [CR7], Itinerary = IIC_LdStSync in
def CFENCE8 : PPCPostRAExpPseudo<(outs), (ins g8rc:$cr), "#CFENCE8", []>;
def : Pat<(PPCaddTls i64:$in, tglobaltlsaddr:$g),
(ADD8TLS $in, tglobaltlsaddr:$g)>;
def ADDIStlsgdHA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIStlsgdHA",
[(set i64:$rD,
(PPCaddisTlsgdHA i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
def ADDItlsgdL : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDItlsgdL",
[(set i64:$rD,
(PPCaddiTlsgdL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
// LR8 is a true define, while the rest of the Defs are clobbers. X3 is
// explicitly defined when this op is created, so not mentioned here.
// This is lowered to BL8_NOP_TLS by the assembly printer, so the size must be
// correct because the branch select pass is relying on it.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1, Size = 8,
Defs = [X0,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
def GETtlsADDR : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
"#GETtlsADDR",
[(set i64:$rD,
(PPCgetTlsAddr i64:$reg, tglobaltlsaddr:$sym))]>,
isPPC64;
// Combined op for ADDItlsgdL and GETtlsADDR, late expanded. X3 and LR8
// are true defines while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [X0,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7]
in
def ADDItlsgdLADDR : PPCEmitTimePseudo<(outs g8rc:$rD),
(ins g8rc_nox0:$reg, s16imm64:$disp, tlsgd:$sym),
"#ADDItlsgdLADDR",
[(set i64:$rD,
(PPCaddiTlsgdLAddr i64:$reg,
tglobaltlsaddr:$disp,
tglobaltlsaddr:$sym))]>,
isPPC64;
def ADDIStlsldHA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIStlsldHA",
[(set i64:$rD,
(PPCaddisTlsldHA i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
def ADDItlsldL : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDItlsldL",
[(set i64:$rD,
(PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
// LR8 is a true define, while the rest of the Defs are clobbers. X3 is
// explicitly defined when this op is created, so not mentioned here.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [X0,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
def GETtlsldADDR : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
"#GETtlsldADDR",
[(set i64:$rD,
(PPCgetTlsldAddr i64:$reg, tglobaltlsaddr:$sym))]>,
isPPC64;
// Combined op for ADDItlsldL and GETtlsADDR, late expanded. X3 and LR8
// are true defines, while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [X0,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7]
in
def ADDItlsldLADDR : PPCEmitTimePseudo<(outs g8rc:$rD),
(ins g8rc_nox0:$reg, s16imm64:$disp, tlsgd:$sym),
"#ADDItlsldLADDR",
[(set i64:$rD,
(PPCaddiTlsldLAddr i64:$reg,
tglobaltlsaddr:$disp,
tglobaltlsaddr:$sym))]>,
isPPC64;
def ADDISdtprelHA: PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDISdtprelHA",
[(set i64:$rD,
(PPCaddisDtprelHA i64:$reg,
tglobaltlsaddr:$disp))]>,
isPPC64;
def ADDIdtprelL : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIdtprelL",
[(set i64:$rD,
(PPCaddiDtprelL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
let PPC970_Unit = 2 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
// Truncating stores.
def STB8 : DForm_1<38, (outs), (ins g8rc:$rS, memri:$src),
"stb $rS, $src", IIC_LdStStore,
[(truncstorei8 i64:$rS, iaddr:$src)]>;
def STH8 : DForm_1<44, (outs), (ins g8rc:$rS, memri:$src),
"sth $rS, $src", IIC_LdStStore,
[(truncstorei16 i64:$rS, iaddr:$src)]>;
def STW8 : DForm_1<36, (outs), (ins g8rc:$rS, memri:$src),
"stw $rS, $src", IIC_LdStStore,
[(truncstorei32 i64:$rS, iaddr:$src)]>;
def STBX8 : XForm_8_memOp<31, 215, (outs), (ins g8rc:$rS, memrr:$dst),
"stbx $rS, $dst", IIC_LdStStore,
[(truncstorei8 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX8 : XForm_8_memOp<31, 407, (outs), (ins g8rc:$rS, memrr:$dst),
"sthx $rS, $dst", IIC_LdStStore,
[(truncstorei16 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX8 : XForm_8_memOp<31, 151, (outs), (ins g8rc:$rS, memrr:$dst),
"stwx $rS, $dst", IIC_LdStStore,
[(truncstorei32 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
} // Interpretation64Bit
// Normal 8-byte stores.
def STD : DSForm_1<62, 0, (outs), (ins g8rc:$rS, memrix:$dst),
"std $rS, $dst", IIC_LdStSTD,
[(aligned4store i64:$rS, iaddrX4:$dst)]>, isPPC64;
def STDX : XForm_8_memOp<31, 149, (outs), (ins g8rc:$rS, memrr:$dst),
"stdx $rS, $dst", IIC_LdStSTD,
[(store i64:$rS, xaddrX4:$dst)]>, isPPC64,
PPC970_DGroup_Cracked;
def STDBRX: XForm_8_memOp<31, 660, (outs), (ins g8rc:$rS, memrr:$dst),
"stdbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i64:$rS, xoaddr:$dst, i64)]>, isPPC64,
PPC970_DGroup_Cracked;
}
// Stores with Update (pre-inc).
let PPC970_Unit = 2, mayStore = 1, mayLoad = 0 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
def STBU8 : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
"stbu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STHU8 : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
"sthu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STWU8 : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
"stwu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STBUX8: XForm_8_memOp<31, 247, (outs ptr_rc_nor0:$ea_res),
(ins g8rc:$rS, memrr:$dst),
"stbux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STHUX8: XForm_8_memOp<31, 439, (outs ptr_rc_nor0:$ea_res),
(ins g8rc:$rS, memrr:$dst),
"sthux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STWUX8: XForm_8_memOp<31, 183, (outs ptr_rc_nor0:$ea_res),
(ins g8rc:$rS, memrr:$dst),
"stwux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
} // Interpretation64Bit
def STDU : DSForm_1<62, 1, (outs ptr_rc_nor0:$ea_res),
(ins g8rc:$rS, memrix:$dst),
"stdu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">,
isPPC64;
def STDUX : XForm_8_memOp<31, 181, (outs ptr_rc_nor0:$ea_res),
(ins g8rc:$rS, memrr:$dst),
"stdux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked, isPPC64;
}
// Patterns to match the pre-inc stores. We can't put the patterns on
// the instruction definitions directly as ISel wants the address base
// and offset to be separate operands, not a single complex operand.
def : Pat<(pre_truncsti8 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STBU8 $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_truncsti16 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STHU8 $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_truncsti32 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STWU8 $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(aligned4pre_store i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STDU $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_truncsti8 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STBUX8 $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_truncsti16 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STHUX8 $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_truncsti32 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STWUX8 $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_store i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STDUX $rS, $ptrreg, $ptroff)>;
//===----------------------------------------------------------------------===//
// Floating point instructions.
//
let PPC970_Unit = 3, hasSideEffects = 0,
Uses = [RM] in { // FPU Operations.
defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfid", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfid f64:$frB))]>, isPPC64;
defm FCTID : XForm_26r<63, 814, (outs f8rc:$frD), (ins f8rc:$frB),
"fctid", "$frD, $frB", IIC_FPGeneral,
[]>, isPPC64;
defm FCTIDU : XForm_26r<63, 942, (outs f8rc:$frD), (ins f8rc:$frB),
"fctidu", "$frD, $frB", IIC_FPGeneral,
[]>, isPPC64;
defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB),
"fctidz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctidz f64:$frB))]>, isPPC64;
defm FCFIDU : XForm_26r<63, 974, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfidu", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfidu f64:$frB))]>, isPPC64;
defm FCFIDS : XForm_26r<59, 846, (outs f4rc:$frD), (ins f8rc:$frB),
"fcfids", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfids f64:$frB))]>, isPPC64;
defm FCFIDUS : XForm_26r<59, 974, (outs f4rc:$frD), (ins f8rc:$frB),
"fcfidus", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfidus f64:$frB))]>, isPPC64;
defm FCTIDUZ : XForm_26r<63, 943, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiduz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiduz f64:$frB))]>, isPPC64;
defm FCTIWUZ : XForm_26r<63, 143, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiwuz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiwuz f64:$frB))]>, isPPC64;
}
//===----------------------------------------------------------------------===//
// Instruction Patterns
//
// Extensions and truncates to/from 32-bit regs.
def : Pat<(i64 (zext i32:$in)),
(RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32),
0, 32)>;
def : Pat<(i64 (anyext i32:$in)),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32)>;
def : Pat<(i32 (trunc i64:$in)),
(EXTRACT_SUBREG $in, sub_32)>;
// Implement the 'not' operation with the NOR instruction.
// (we could use the default xori pattern, but nor has lower latency on some
// cores (such as the A2)).
def i64not : OutPatFrag<(ops node:$in),
(NOR8 $in, $in)>;
def : Pat<(not i64:$in),
(i64not $in)>;
// Extending loads with i64 targets.
def : Pat<(zextloadi1 iaddr:$src),
(LBZ8 iaddr:$src)>;
def : Pat<(zextloadi1 xaddr:$src),
(LBZX8 xaddr:$src)>;
def : Pat<(extloadi1 iaddr:$src),
(LBZ8 iaddr:$src)>;
def : Pat<(extloadi1 xaddr:$src),
(LBZX8 xaddr:$src)>;
def : Pat<(extloadi8 iaddr:$src),
(LBZ8 iaddr:$src)>;
def : Pat<(extloadi8 xaddr:$src),
(LBZX8 xaddr:$src)>;
def : Pat<(extloadi16 iaddr:$src),
(LHZ8 iaddr:$src)>;
def : Pat<(extloadi16 xaddr:$src),
(LHZX8 xaddr:$src)>;
def : Pat<(extloadi32 iaddr:$src),
(LWZ8 iaddr:$src)>;
def : Pat<(extloadi32 xaddr:$src),
(LWZX8 xaddr:$src)>;
// Standard shifts. These are represented separately from the real shifts above
// so that we can distinguish between shifts that allow 6-bit and 7-bit shift
// amounts.
def : Pat<(sra i64:$rS, i32:$rB),
(SRAD $rS, $rB)>;
def : Pat<(srl i64:$rS, i32:$rB),
(SRD $rS, $rB)>;
def : Pat<(shl i64:$rS, i32:$rB),
(SLD $rS, $rB)>;
// SUBFIC
def : Pat<(sub imm64SExt16:$imm, i64:$in),
(SUBFIC8 $in, imm:$imm)>;
// SHL/SRL
def : Pat<(shl i64:$in, (i32 imm:$imm)),
(RLDICR $in, imm:$imm, (SHL64 imm:$imm))>;
def : Pat<(srl i64:$in, (i32 imm:$imm)),
(RLDICL $in, (SRL64 imm:$imm), imm:$imm)>;
// ROTL
def : Pat<(rotl i64:$in, i32:$sh),
(RLDCL $in, $sh, 0)>;
def : Pat<(rotl i64:$in, (i32 imm:$imm)),
(RLDICL $in, imm:$imm, 0)>;
// Hi and Lo for Darwin Global Addresses.
def : Pat<(PPChi tglobaladdr:$in, 0), (LIS8 tglobaladdr:$in)>;
def : Pat<(PPClo tglobaladdr:$in, 0), (LI8 tglobaladdr:$in)>;
def : Pat<(PPChi tconstpool:$in , 0), (LIS8 tconstpool:$in)>;
def : Pat<(PPClo tconstpool:$in , 0), (LI8 tconstpool:$in)>;
def : Pat<(PPChi tjumptable:$in , 0), (LIS8 tjumptable:$in)>;
def : Pat<(PPClo tjumptable:$in , 0), (LI8 tjumptable:$in)>;
def : Pat<(PPChi tblockaddress:$in, 0), (LIS8 tblockaddress:$in)>;
def : Pat<(PPClo tblockaddress:$in, 0), (LI8 tblockaddress:$in)>;
def : Pat<(PPChi tglobaltlsaddr:$g, i64:$in),
(ADDIS8 $in, tglobaltlsaddr:$g)>;
def : Pat<(PPClo tglobaltlsaddr:$g, i64:$in),
(ADDI8 $in, tglobaltlsaddr:$g)>;
def : Pat<(add i64:$in, (PPChi tglobaladdr:$g, 0)),
(ADDIS8 $in, tglobaladdr:$g)>;
def : Pat<(add i64:$in, (PPChi tconstpool:$g, 0)),
(ADDIS8 $in, tconstpool:$g)>;
def : Pat<(add i64:$in, (PPChi tjumptable:$g, 0)),
(ADDIS8 $in, tjumptable:$g)>;
def : Pat<(add i64:$in, (PPChi tblockaddress:$g, 0)),
(ADDIS8 $in, tblockaddress:$g)>;
// Patterns to match r+r indexed loads and stores for
// addresses without at least 4-byte alignment.
def : Pat<(i64 (unaligned4sextloadi32 xoaddr:$src)),
(LWAX xoaddr:$src)>;
def : Pat<(i64 (unaligned4load xoaddr:$src)),
(LDX xoaddr:$src)>;
def : Pat<(unaligned4store i64:$rS, xoaddr:$dst),
(STDX $rS, xoaddr:$dst)>;
// 64-bits atomic loads and stores
def : Pat<(atomic_load_64 iaddrX4:$src), (LD memrix:$src)>;
def : Pat<(atomic_load_64 xaddrX4:$src), (LDX memrr:$src)>;
def : Pat<(atomic_store_64 iaddrX4:$ptr, i64:$val), (STD g8rc:$val, memrix:$ptr)>;
def : Pat<(atomic_store_64 xaddrX4:$ptr, i64:$val), (STDX g8rc:$val, memrr:$ptr)>;
let Predicates = [IsISA3_0] in {
class X_L1_RA5_RB5<bits<6> opcode, bits<10> xo, string opc, RegisterOperand ty,
InstrItinClass itin, list<dag> pattern>
: X_L1_RS5_RS5<opcode, xo, (outs), (ins ty:$rA, ty:$rB, u1imm:$L),
!strconcat(opc, " $rA, $rB, $L"), itin, pattern>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
def CP_COPY8 : X_L1_RA5_RB5<31, 774, "copy" , g8rc, IIC_LdStCOPY, []>;
def CP_PASTE8 : X_L1_RA5_RB5<31, 902, "paste" , g8rc, IIC_LdStPASTE, []>;
def CP_PASTE8o : X_L1_RA5_RB5<31, 902, "paste.", g8rc, IIC_LdStPASTE, []>,isDOT;
}
// SLB Invalidate Entry Global
def SLBIEG : XForm_26<31, 466, (outs), (ins gprc:$RS, gprc:$RB),
"slbieg $RS, $RB", IIC_SprSLBIEG, []>;
// SLB Synchronize
def SLBSYNC : XForm_0<31, 338, (outs), (ins), "slbsync", IIC_SprSLBSYNC, []>;
} // IsISA3_0
Index: stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td (revision 356465)
@@ -1,5016 +1,5072 @@
//===-- PPCInstrInfo.td - The PowerPC Instruction Set ------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the subset of the 32-bit PowerPC instruction set, as used
// by the PowerPC instruction selector.
//
//===----------------------------------------------------------------------===//
include "PPCInstrFormats.td"
//===----------------------------------------------------------------------===//
// PowerPC specific type constraints.
//
def SDT_PPCstfiwx : SDTypeProfile<0, 2, [ // stfiwx
SDTCisVT<0, f64>, SDTCisPtrTy<1>
]>;
def SDT_PPClfiwx : SDTypeProfile<1, 1, [ // lfiw[az]x
SDTCisVT<0, f64>, SDTCisPtrTy<1>
]>;
def SDT_PPCLxsizx : SDTypeProfile<1, 2, [
SDTCisVT<0, f64>, SDTCisPtrTy<1>, SDTCisPtrTy<2>
]>;
def SDT_PPCstxsix : SDTypeProfile<0, 3, [
SDTCisVT<0, f64>, SDTCisPtrTy<1>, SDTCisPtrTy<2>
]>;
def SDT_PPCcv_fp_to_int : SDTypeProfile<1, 1, [
SDTCisFP<0>, SDTCisFP<1>
]>;
def SDT_PPCstore_scal_int_from_vsr : SDTypeProfile<0, 3, [
SDTCisVT<0, f64>, SDTCisPtrTy<1>, SDTCisPtrTy<2>
]>;
def SDT_PPCVexts : SDTypeProfile<1, 2, [
SDTCisVT<0, f64>, SDTCisVT<1, f64>, SDTCisPtrTy<2>
]>;
def SDT_PPCSExtVElems : SDTypeProfile<1, 1, [
SDTCisVec<0>, SDTCisVec<1>
]>;
def SDT_PPCCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
def SDT_PPCCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
def SDT_PPCvperm : SDTypeProfile<1, 3, [
SDTCisVT<3, v16i8>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>
]>;
def SDT_PPCVecSplat : SDTypeProfile<1, 2, [ SDTCisVec<0>,
SDTCisVec<1>, SDTCisInt<2>
]>;
def SDT_PPCVecShift : SDTypeProfile<1, 3, [ SDTCisVec<0>,
SDTCisVec<1>, SDTCisVec<2>, SDTCisPtrTy<3>
]>;
def SDT_PPCVecInsert : SDTypeProfile<1, 3, [ SDTCisVec<0>,
SDTCisVec<1>, SDTCisVec<2>, SDTCisInt<3>
]>;
def SDT_PPCVecReverse: SDTypeProfile<1, 1, [ SDTCisVec<0>,
SDTCisVec<1>
]>;
def SDT_PPCxxpermdi: SDTypeProfile<1, 3, [ SDTCisVec<0>,
SDTCisVec<1>, SDTCisVec<2>, SDTCisInt<3>
]>;
def SDT_PPCvcmp : SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i32>
]>;
def SDT_PPCcondbr : SDTypeProfile<0, 3, [
SDTCisVT<0, i32>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPClbrx : SDTypeProfile<1, 2, [
SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPCstbrx : SDTypeProfile<0, 3, [
SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPCTC_ret : SDTypeProfile<0, 2, [
SDTCisPtrTy<0>, SDTCisVT<1, i32>
]>;
def tocentry32 : Operand<iPTR> {
let MIOperandInfo = (ops i32imm:$imm);
}
def SDT_PPCqvfperm : SDTypeProfile<1, 3, [
SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<3>
]>;
def SDT_PPCqvgpci : SDTypeProfile<1, 1, [
SDTCisVec<0>, SDTCisInt<1>
]>;
def SDT_PPCqvaligni : SDTypeProfile<1, 3, [
SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<3>
]>;
def SDT_PPCqvesplati : SDTypeProfile<1, 2, [
SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
]>;
def SDT_PPCqbflt : SDTypeProfile<1, 1, [
SDTCisVec<0>, SDTCisVec<1>
]>;
def SDT_PPCqvlfsb : SDTypeProfile<1, 1, [
SDTCisVec<0>, SDTCisPtrTy<1>
]>;
def SDT_PPCextswsli : SDTypeProfile<1, 2, [ // extswsli
SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisInt<2>
]>;
//===----------------------------------------------------------------------===//
// PowerPC specific DAG Nodes.
//
def PPCfre : SDNode<"PPCISD::FRE", SDTFPUnaryOp, []>;
def PPCfrsqrte: SDNode<"PPCISD::FRSQRTE", SDTFPUnaryOp, []>;
def PPCfcfid : SDNode<"PPCISD::FCFID", SDTFPUnaryOp, []>;
def PPCfcfidu : SDNode<"PPCISD::FCFIDU", SDTFPUnaryOp, []>;
def PPCfcfids : SDNode<"PPCISD::FCFIDS", SDTFPRoundOp, []>;
def PPCfcfidus: SDNode<"PPCISD::FCFIDUS", SDTFPRoundOp, []>;
def PPCfctidz : SDNode<"PPCISD::FCTIDZ", SDTFPUnaryOp, []>;
def PPCfctiwz : SDNode<"PPCISD::FCTIWZ", SDTFPUnaryOp, []>;
def PPCfctiduz: SDNode<"PPCISD::FCTIDUZ",SDTFPUnaryOp, []>;
def PPCfctiwuz: SDNode<"PPCISD::FCTIWUZ",SDTFPUnaryOp, []>;
def PPCcv_fp_to_uint_in_vsr:
SDNode<"PPCISD::FP_TO_UINT_IN_VSR", SDT_PPCcv_fp_to_int, []>;
def PPCcv_fp_to_sint_in_vsr:
SDNode<"PPCISD::FP_TO_SINT_IN_VSR", SDT_PPCcv_fp_to_int, []>;
def PPCstore_scal_int_from_vsr:
SDNode<"PPCISD::ST_VSR_SCAL_INT", SDT_PPCstore_scal_int_from_vsr,
[SDNPHasChain, SDNPMayStore]>;
def PPCstfiwx : SDNode<"PPCISD::STFIWX", SDT_PPCstfiwx,
[SDNPHasChain, SDNPMayStore]>;
def PPClfiwax : SDNode<"PPCISD::LFIWAX", SDT_PPClfiwx,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def PPClfiwzx : SDNode<"PPCISD::LFIWZX", SDT_PPClfiwx,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def PPClxsizx : SDNode<"PPCISD::LXSIZX", SDT_PPCLxsizx,
[SDNPHasChain, SDNPMayLoad]>;
def PPCstxsix : SDNode<"PPCISD::STXSIX", SDT_PPCstxsix,
[SDNPHasChain, SDNPMayStore]>;
def PPCVexts : SDNode<"PPCISD::VEXTS", SDT_PPCVexts, []>;
def PPCSExtVElems : SDNode<"PPCISD::SExtVElems", SDT_PPCSExtVElems, []>;
// Extract FPSCR (not modeled at the DAG level).
def PPCmffs : SDNode<"PPCISD::MFFS",
SDTypeProfile<1, 0, [SDTCisVT<0, f64>]>, []>;
// Perform FADD in round-to-zero mode.
def PPCfaddrtz: SDNode<"PPCISD::FADDRTZ", SDTFPBinOp, []>;
def PPCfsel : SDNode<"PPCISD::FSEL",
// Type constraint for fsel.
SDTypeProfile<1, 3, [SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,
SDTCisFP<0>, SDTCisVT<1, f64>]>, []>;
def PPChi : SDNode<"PPCISD::Hi", SDTIntBinOp, []>;
def PPClo : SDNode<"PPCISD::Lo", SDTIntBinOp, []>;
def PPCtoc_entry: SDNode<"PPCISD::TOC_ENTRY", SDTIntBinOp,
[SDNPMayLoad, SDNPMemOperand]>;
def PPCvmaddfp : SDNode<"PPCISD::VMADDFP", SDTFPTernaryOp, []>;
def PPCvnmsubfp : SDNode<"PPCISD::VNMSUBFP", SDTFPTernaryOp, []>;
def PPCppc32GOT : SDNode<"PPCISD::PPC32_GOT", SDTIntLeaf, []>;
def PPCaddisGotTprelHA : SDNode<"PPCISD::ADDIS_GOT_TPREL_HA", SDTIntBinOp>;
def PPCldGotTprelL : SDNode<"PPCISD::LD_GOT_TPREL_L", SDTIntBinOp,
[SDNPMayLoad]>;
def PPCaddTls : SDNode<"PPCISD::ADD_TLS", SDTIntBinOp, []>;
def PPCaddisTlsgdHA : SDNode<"PPCISD::ADDIS_TLSGD_HA", SDTIntBinOp>;
def PPCaddiTlsgdL : SDNode<"PPCISD::ADDI_TLSGD_L", SDTIntBinOp>;
def PPCgetTlsAddr : SDNode<"PPCISD::GET_TLS_ADDR", SDTIntBinOp>;
def PPCaddiTlsgdLAddr : SDNode<"PPCISD::ADDI_TLSGD_L_ADDR",
SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>, SDTCisInt<0> ]>>;
def PPCaddisTlsldHA : SDNode<"PPCISD::ADDIS_TLSLD_HA", SDTIntBinOp>;
def PPCaddiTlsldL : SDNode<"PPCISD::ADDI_TLSLD_L", SDTIntBinOp>;
def PPCgetTlsldAddr : SDNode<"PPCISD::GET_TLSLD_ADDR", SDTIntBinOp>;
def PPCaddiTlsldLAddr : SDNode<"PPCISD::ADDI_TLSLD_L_ADDR",
SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>, SDTCisInt<0> ]>>;
def PPCaddisDtprelHA : SDNode<"PPCISD::ADDIS_DTPREL_HA", SDTIntBinOp>;
def PPCaddiDtprelL : SDNode<"PPCISD::ADDI_DTPREL_L", SDTIntBinOp>;
def PPCvperm : SDNode<"PPCISD::VPERM", SDT_PPCvperm, []>;
def PPCxxsplt : SDNode<"PPCISD::XXSPLT", SDT_PPCVecSplat, []>;
def PPCvecinsert : SDNode<"PPCISD::VECINSERT", SDT_PPCVecInsert, []>;
def PPCxxreverse : SDNode<"PPCISD::XXREVERSE", SDT_PPCVecReverse, []>;
def PPCxxpermdi : SDNode<"PPCISD::XXPERMDI", SDT_PPCxxpermdi, []>;
def PPCvecshl : SDNode<"PPCISD::VECSHL", SDT_PPCVecShift, []>;
def PPCqvfperm : SDNode<"PPCISD::QVFPERM", SDT_PPCqvfperm, []>;
def PPCqvgpci : SDNode<"PPCISD::QVGPCI", SDT_PPCqvgpci, []>;
def PPCqvaligni : SDNode<"PPCISD::QVALIGNI", SDT_PPCqvaligni, []>;
def PPCqvesplati : SDNode<"PPCISD::QVESPLATI", SDT_PPCqvesplati, []>;
def PPCqbflt : SDNode<"PPCISD::QBFLT", SDT_PPCqbflt, []>;
def PPCqvlfsb : SDNode<"PPCISD::QVLFSb", SDT_PPCqvlfsb,
[SDNPHasChain, SDNPMayLoad]>;
def PPCcmpb : SDNode<"PPCISD::CMPB", SDTIntBinOp, []>;
// These nodes represent the 32-bit PPC shifts that operate on 6-bit shift
// amounts. These nodes are generated by the multi-precision shift code.
def PPCsrl : SDNode<"PPCISD::SRL" , SDTIntShiftOp>;
def PPCsra : SDNode<"PPCISD::SRA" , SDTIntShiftOp>;
def PPCshl : SDNode<"PPCISD::SHL" , SDTIntShiftOp>;
def PPCextswsli : SDNode<"PPCISD::EXTSWSLI" , SDT_PPCextswsli>;
// Move 2 i64 values into a VSX register
def PPCbuild_fp128: SDNode<"PPCISD::BUILD_FP128",
SDTypeProfile<1, 2,
[SDTCisFP<0>, SDTCisSameSizeAs<1,2>,
SDTCisSameAs<1,2>]>,
[]>;
def PPCbuild_spe64: SDNode<"PPCISD::BUILD_SPE64",
SDTypeProfile<1, 2,
[SDTCisVT<0, f64>, SDTCisVT<1,i32>,
SDTCisVT<1,i32>]>,
[]>;
def PPCextract_spe : SDNode<"PPCISD::EXTRACT_SPE",
SDTypeProfile<1, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, f64>,
SDTCisPtrTy<2>]>,
[]>;
// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_PPCCallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PPCCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
def PPCcall : SDNode<"PPCISD::CALL", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def PPCcall_nop : SDNode<"PPCISD::CALL_NOP", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCbctrl : SDNode<"PPCISD::BCTRL", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def PPCbctrl_load_toc : SDNode<"PPCISD::BCTRL_LOAD_TOC",
SDTypeProfile<0, 1, []>,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def PPCeh_sjlj_setjmp : SDNode<"PPCISD::EH_SJLJ_SETJMP",
SDTypeProfile<1, 1, [SDTCisInt<0>,
SDTCisPtrTy<1>]>,
[SDNPHasChain, SDNPSideEffect]>;
def PPCeh_sjlj_longjmp : SDNode<"PPCISD::EH_SJLJ_LONGJMP",
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
[SDNPHasChain, SDNPSideEffect]>;
def SDT_PPCsc : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def PPCsc : SDNode<"PPCISD::SC", SDT_PPCsc,
[SDNPHasChain, SDNPSideEffect]>;
def PPCclrbhrb : SDNode<"PPCISD::CLRBHRB", SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
def PPCmfbhrbe : SDNode<"PPCISD::MFBHRBE", SDTIntBinOp, [SDNPHasChain]>;
def PPCrfebb : SDNode<"PPCISD::RFEBB", SDT_PPCsc,
[SDNPHasChain, SDNPSideEffect]>;
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutGlue]>;
def PPCcondbranch : SDNode<"PPCISD::COND_BRANCH", SDT_PPCcondbr,
[SDNPHasChain, SDNPOptInGlue]>;
// PPC-specific atomic operations.
def PPCatomicCmpSwap_8 :
SDNode<"PPCISD::ATOMIC_CMP_SWAP_8", SDTAtomic3,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def PPCatomicCmpSwap_16 :
SDNode<"PPCISD::ATOMIC_CMP_SWAP_16", SDTAtomic3,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def PPClbrx : SDNode<"PPCISD::LBRX", SDT_PPClbrx,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def PPCstbrx : SDNode<"PPCISD::STBRX", SDT_PPCstbrx,
[SDNPHasChain, SDNPMayStore]>;
// Instructions to set/unset CR bit 6 for SVR4 vararg calls
def PPCcr6set : SDNode<"PPCISD::CR6SET", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCcr6unset : SDNode<"PPCISD::CR6UNSET", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
// Instructions to support dynamic alloca.
def SDTDynOp : SDTypeProfile<1, 2, []>;
def SDTDynAreaOp : SDTypeProfile<1, 1, []>;
def PPCdynalloc : SDNode<"PPCISD::DYNALLOC", SDTDynOp, [SDNPHasChain]>;
def PPCdynareaoffset : SDNode<"PPCISD::DYNAREAOFFSET", SDTDynAreaOp, [SDNPHasChain]>;
//===----------------------------------------------------------------------===//
// PowerPC specific transformation functions and pattern fragments.
//
def SHL32 : SDNodeXForm<imm, [{
// Transformation function: 31 - imm
return getI32Imm(31 - N->getZExtValue(), SDLoc(N));
}]>;
def SRL32 : SDNodeXForm<imm, [{
// Transformation function: 32 - imm
return N->getZExtValue() ? getI32Imm(32 - N->getZExtValue(), SDLoc(N))
: getI32Imm(0, SDLoc(N));
}]>;
def LO16 : SDNodeXForm<imm, [{
// Transformation function: get the low 16 bits.
return getI32Imm((unsigned short)N->getZExtValue(), SDLoc(N));
}]>;
def HI16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
return getI32Imm((unsigned)N->getZExtValue() >> 16, SDLoc(N));
}]>;
def HA16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
long Val = N->getZExtValue();
return getI32Imm((Val - (signed short)Val) >> 16, SDLoc(N));
}]>;
def MB : SDNodeXForm<imm, [{
// Transformation function: get the start bit of a mask
unsigned mb = 0, me;
(void)isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
return getI32Imm(mb, SDLoc(N));
}]>;
def ME : SDNodeXForm<imm, [{
// Transformation function: get the end bit of a mask
unsigned mb, me = 0;
(void)isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
return getI32Imm(me, SDLoc(N));
}]>;
def maskimm32 : PatLeaf<(imm), [{
// maskImm predicate - True if immediate is a run of ones.
unsigned mb, me;
if (N->getValueType(0) == MVT::i32)
return isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
else
return false;
}]>;
def imm32SExt16 : Operand<i32>, ImmLeaf<i32, [{
// imm32SExt16 predicate - True if the i32 immediate fits in a 16-bit
// sign extended field. Used by instructions like 'addi'.
return (int32_t)Imm == (short)Imm;
}]>;
def imm64SExt16 : Operand<i64>, ImmLeaf<i64, [{
// imm64SExt16 predicate - True if the i64 immediate fits in a 16-bit
// sign extended field. Used by instructions like 'addi'.
return (int64_t)Imm == (short)Imm;
}]>;
def immZExt16 : PatLeaf<(imm), [{
// immZExt16 predicate - True if the immediate fits in a 16-bit zero extended
// field. Used by instructions like 'ori'.
return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
}], LO16>;
def immAnyExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm) || isUInt<8>(Imm); }]>;
def immSExt5NonZero : ImmLeaf<i32, [{ return Imm && isInt<5>(Imm); }]>;
// imm16Shifted* - These match immediates where the low 16-bits are zero. There
// are two forms: imm16ShiftedSExt and imm16ShiftedZExt. These two forms are
// identical in 32-bit mode, but in 64-bit mode, they return true if the
// immediate fits into a sign/zero extended 32-bit immediate (with the low bits
// clear).
def imm16ShiftedZExt : PatLeaf<(imm), [{
// imm16ShiftedZExt predicate - True if only bits in the top 16-bits of the
// immediate are set. Used by instructions like 'xoris'.
return (N->getZExtValue() & ~uint64_t(0xFFFF0000)) == 0;
}], HI16>;
def imm16ShiftedSExt : PatLeaf<(imm), [{
// imm16ShiftedSExt predicate - True if only bits in the top 16-bits of the
// immediate are set. Used by instructions like 'addis'. Identical to
// imm16ShiftedZExt in 32-bit mode.
if (N->getZExtValue() & 0xFFFF) return false;
if (N->getValueType(0) == MVT::i32)
return true;
// For 64-bit, make sure it is sext right.
return N->getZExtValue() == (uint64_t)(int)N->getZExtValue();
}], HI16>;
def imm64ZExt32 : Operand<i64>, ImmLeaf<i64, [{
// imm64ZExt32 predicate - True if the i64 immediate fits in a 32-bit
// zero extended field.
return isUInt<32>(Imm);
}]>;
// Some r+i load/store instructions (such as LD, STD, LDU, etc.) that require
// restricted memrix (4-aligned) constants are alignment sensitive. If these
// offsets are hidden behind TOC entries than the values of the lower-order
// bits cannot be checked directly. As a result, we need to also incorporate
// an alignment check into the relevant patterns.
def aligned4load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 4;
}]>;
def aligned4store : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getAlignment() >= 4;
}]>;
def aligned4sextloadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 4;
}]>;
def aligned4pre_store : PatFrag<
(ops node:$val, node:$base, node:$offset),
(pre_store node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getAlignment() >= 4;
}]>;
def unaligned4load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() < 4;
}]>;
def unaligned4store : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getAlignment() < 4;
}]>;
def unaligned4sextloadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() < 4;
}]>;
// This is a somewhat weaker condition than actually checking for 16-byte
// alignment. It is simply checking that the displacement can be represented
// as an immediate that is a multiple of 16 (i.e. the requirements for DQ-Form
// instructions).
def quadwOffsetLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return isOffsetMultipleOf(N, 16);
}]>;
def quadwOffsetStore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
return isOffsetMultipleOf(N, 16);
}]>;
def nonQuadwOffsetLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return !isOffsetMultipleOf(N, 16);
}]>;
def nonQuadwOffsetStore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
return !isOffsetMultipleOf(N, 16);
}]>;
// PatFrag for binary operation whose operands are both non-constant
class BinOpWithoutSImm16Operand<SDNode opcode> :
PatFrag<(ops node:$left, node:$right), (opcode node:$left, node:$right), [{
int16_t Imm;
return !isIntS16Immediate(N->getOperand(0), Imm)
&& !isIntS16Immediate(N->getOperand(1), Imm);
}]>;
def add_without_simm16 : BinOpWithoutSImm16Operand<add>;
def mul_without_simm16 : BinOpWithoutSImm16Operand<mul>;
//===----------------------------------------------------------------------===//
// PowerPC Flag Definitions.
class isPPC64 { bit PPC64 = 1; }
class isDOT { bit RC = 1; }
class RegConstraint<string C> {
string Constraints = C;
}
class NoEncode<string E> {
string DisableEncoding = E;
}
//===----------------------------------------------------------------------===//
// PowerPC Operand Definitions.
// In the default PowerPC assembler syntax, registers are specified simply
// by number, so they cannot be distinguished from immediate values (without
// looking at the opcode). This means that the default operand matching logic
// for the asm parser does not work, and we need to specify custom matchers.
// Since those can only be specified with RegisterOperand classes and not
// directly on the RegisterClass, all instructions patterns used by the asm
// parser need to use a RegisterOperand (instead of a RegisterClass) for
// all their register operands.
// For this purpose, we define one RegisterOperand for each RegisterClass,
// using the same name as the class, just in lower case.
def PPCRegGPRCAsmOperand : AsmOperandClass {
let Name = "RegGPRC"; let PredicateMethod = "isRegNumber";
}
def gprc : RegisterOperand<GPRC> {
let ParserMatchClass = PPCRegGPRCAsmOperand;
}
def PPCRegG8RCAsmOperand : AsmOperandClass {
let Name = "RegG8RC"; let PredicateMethod = "isRegNumber";
}
def g8rc : RegisterOperand<G8RC> {
let ParserMatchClass = PPCRegG8RCAsmOperand;
}
def PPCRegGPRCNoR0AsmOperand : AsmOperandClass {
let Name = "RegGPRCNoR0"; let PredicateMethod = "isRegNumber";
}
def gprc_nor0 : RegisterOperand<GPRC_NOR0> {
let ParserMatchClass = PPCRegGPRCNoR0AsmOperand;
}
def PPCRegG8RCNoX0AsmOperand : AsmOperandClass {
let Name = "RegG8RCNoX0"; let PredicateMethod = "isRegNumber";
}
def g8rc_nox0 : RegisterOperand<G8RC_NOX0> {
let ParserMatchClass = PPCRegG8RCNoX0AsmOperand;
}
def PPCRegF8RCAsmOperand : AsmOperandClass {
let Name = "RegF8RC"; let PredicateMethod = "isRegNumber";
}
def f8rc : RegisterOperand<F8RC> {
let ParserMatchClass = PPCRegF8RCAsmOperand;
}
def PPCRegF4RCAsmOperand : AsmOperandClass {
let Name = "RegF4RC"; let PredicateMethod = "isRegNumber";
}
def f4rc : RegisterOperand<F4RC> {
let ParserMatchClass = PPCRegF4RCAsmOperand;
}
def PPCRegVRRCAsmOperand : AsmOperandClass {
let Name = "RegVRRC"; let PredicateMethod = "isRegNumber";
}
def vrrc : RegisterOperand<VRRC> {
let ParserMatchClass = PPCRegVRRCAsmOperand;
}
def PPCRegVFRCAsmOperand : AsmOperandClass {
let Name = "RegVFRC"; let PredicateMethod = "isRegNumber";
}
def vfrc : RegisterOperand<VFRC> {
let ParserMatchClass = PPCRegVFRCAsmOperand;
}
def PPCRegCRBITRCAsmOperand : AsmOperandClass {
let Name = "RegCRBITRC"; let PredicateMethod = "isCRBitNumber";
}
def crbitrc : RegisterOperand<CRBITRC> {
let ParserMatchClass = PPCRegCRBITRCAsmOperand;
}
def PPCRegCRRCAsmOperand : AsmOperandClass {
let Name = "RegCRRC"; let PredicateMethod = "isCCRegNumber";
}
def crrc : RegisterOperand<CRRC> {
let ParserMatchClass = PPCRegCRRCAsmOperand;
}
def PPCRegSPERCAsmOperand : AsmOperandClass {
let Name = "RegSPERC"; let PredicateMethod = "isRegNumber";
}
def sperc : RegisterOperand<SPERC> {
let ParserMatchClass = PPCRegSPERCAsmOperand;
}
def PPCRegSPE4RCAsmOperand : AsmOperandClass {
let Name = "RegSPE4RC"; let PredicateMethod = "isRegNumber";
}
def spe4rc : RegisterOperand<SPE4RC> {
let ParserMatchClass = PPCRegSPE4RCAsmOperand;
}
def PPCU1ImmAsmOperand : AsmOperandClass {
let Name = "U1Imm"; let PredicateMethod = "isU1Imm";
let RenderMethod = "addImmOperands";
}
def u1imm : Operand<i32> {
let PrintMethod = "printU1ImmOperand";
let ParserMatchClass = PPCU1ImmAsmOperand;
}
def PPCU2ImmAsmOperand : AsmOperandClass {
let Name = "U2Imm"; let PredicateMethod = "isU2Imm";
let RenderMethod = "addImmOperands";
}
def u2imm : Operand<i32> {
let PrintMethod = "printU2ImmOperand";
let ParserMatchClass = PPCU2ImmAsmOperand;
}
def PPCATBitsAsHintAsmOperand : AsmOperandClass {
let Name = "ATBitsAsHint"; let PredicateMethod = "isATBitsAsHint";
let RenderMethod = "addImmOperands"; // Irrelevant, predicate always fails.
}
def atimm : Operand<i32> {
let PrintMethod = "printATBitsAsHint";
let ParserMatchClass = PPCATBitsAsHintAsmOperand;
}
def PPCU3ImmAsmOperand : AsmOperandClass {
let Name = "U3Imm"; let PredicateMethod = "isU3Imm";
let RenderMethod = "addImmOperands";
}
def u3imm : Operand<i32> {
let PrintMethod = "printU3ImmOperand";
let ParserMatchClass = PPCU3ImmAsmOperand;
}
def PPCU4ImmAsmOperand : AsmOperandClass {
let Name = "U4Imm"; let PredicateMethod = "isU4Imm";
let RenderMethod = "addImmOperands";
}
def u4imm : Operand<i32> {
let PrintMethod = "printU4ImmOperand";
let ParserMatchClass = PPCU4ImmAsmOperand;
}
def PPCS5ImmAsmOperand : AsmOperandClass {
let Name = "S5Imm"; let PredicateMethod = "isS5Imm";
let RenderMethod = "addImmOperands";
}
def s5imm : Operand<i32> {
let PrintMethod = "printS5ImmOperand";
let ParserMatchClass = PPCS5ImmAsmOperand;
let DecoderMethod = "decodeSImmOperand<5>";
}
def PPCU5ImmAsmOperand : AsmOperandClass {
let Name = "U5Imm"; let PredicateMethod = "isU5Imm";
let RenderMethod = "addImmOperands";
}
def u5imm : Operand<i32> {
let PrintMethod = "printU5ImmOperand";
let ParserMatchClass = PPCU5ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<5>";
}
def PPCU6ImmAsmOperand : AsmOperandClass {
let Name = "U6Imm"; let PredicateMethod = "isU6Imm";
let RenderMethod = "addImmOperands";
}
def u6imm : Operand<i32> {
let PrintMethod = "printU6ImmOperand";
let ParserMatchClass = PPCU6ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<6>";
}
def PPCU7ImmAsmOperand : AsmOperandClass {
let Name = "U7Imm"; let PredicateMethod = "isU7Imm";
let RenderMethod = "addImmOperands";
}
def u7imm : Operand<i32> {
let PrintMethod = "printU7ImmOperand";
let ParserMatchClass = PPCU7ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<7>";
}
def PPCU8ImmAsmOperand : AsmOperandClass {
let Name = "U8Imm"; let PredicateMethod = "isU8Imm";
let RenderMethod = "addImmOperands";
}
def u8imm : Operand<i32> {
let PrintMethod = "printU8ImmOperand";
let ParserMatchClass = PPCU8ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<8>";
}
def PPCU10ImmAsmOperand : AsmOperandClass {
let Name = "U10Imm"; let PredicateMethod = "isU10Imm";
let RenderMethod = "addImmOperands";
}
def u10imm : Operand<i32> {
let PrintMethod = "printU10ImmOperand";
let ParserMatchClass = PPCU10ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<10>";
}
def PPCU12ImmAsmOperand : AsmOperandClass {
let Name = "U12Imm"; let PredicateMethod = "isU12Imm";
let RenderMethod = "addImmOperands";
}
def u12imm : Operand<i32> {
let PrintMethod = "printU12ImmOperand";
let ParserMatchClass = PPCU12ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<12>";
}
def PPCS16ImmAsmOperand : AsmOperandClass {
let Name = "S16Imm"; let PredicateMethod = "isS16Imm";
let RenderMethod = "addS16ImmOperands";
}
def s16imm : Operand<i32> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
let DecoderMethod = "decodeSImmOperand<16>";
}
def PPCU16ImmAsmOperand : AsmOperandClass {
let Name = "U16Imm"; let PredicateMethod = "isU16Imm";
let RenderMethod = "addU16ImmOperands";
}
def u16imm : Operand<i32> {
let PrintMethod = "printU16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<16>";
}
def PPCS17ImmAsmOperand : AsmOperandClass {
let Name = "S17Imm"; let PredicateMethod = "isS17Imm";
let RenderMethod = "addS16ImmOperands";
}
def s17imm : Operand<i32> {
// This operand type is used for addis/lis to allow the assembler parser
// to accept immediates in the range -65536..65535 for compatibility with
// the GNU assembler. The operand is treated as 16-bit otherwise.
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS17ImmAsmOperand;
let DecoderMethod = "decodeSImmOperand<16>";
}
def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
def PPCDirectBrAsmOperand : AsmOperandClass {
let Name = "DirectBr"; let PredicateMethod = "isDirectBr";
let RenderMethod = "addBranchTargetOperands";
}
def directbrtarget : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
let EncoderMethod = "getDirectBrEncoding";
let ParserMatchClass = PPCDirectBrAsmOperand;
}
def absdirectbrtarget : Operand<OtherVT> {
let PrintMethod = "printAbsBranchOperand";
let EncoderMethod = "getAbsDirectBrEncoding";
let ParserMatchClass = PPCDirectBrAsmOperand;
}
def PPCCondBrAsmOperand : AsmOperandClass {
let Name = "CondBr"; let PredicateMethod = "isCondBr";
let RenderMethod = "addBranchTargetOperands";
}
def condbrtarget : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
let EncoderMethod = "getCondBrEncoding";
let ParserMatchClass = PPCCondBrAsmOperand;
}
def abscondbrtarget : Operand<OtherVT> {
let PrintMethod = "printAbsBranchOperand";
let EncoderMethod = "getAbsCondBrEncoding";
let ParserMatchClass = PPCCondBrAsmOperand;
}
def calltarget : Operand<iPTR> {
let PrintMethod = "printBranchOperand";
let EncoderMethod = "getDirectBrEncoding";
let DecoderMethod = "DecodePCRel24BranchTarget";
let ParserMatchClass = PPCDirectBrAsmOperand;
let OperandType = "OPERAND_PCREL";
}
def abscalltarget : Operand<iPTR> {
let PrintMethod = "printAbsBranchOperand";
let EncoderMethod = "getAbsDirectBrEncoding";
let ParserMatchClass = PPCDirectBrAsmOperand;
}
def PPCCRBitMaskOperand : AsmOperandClass {
let Name = "CRBitMask"; let PredicateMethod = "isCRBitMask";
}
def crbitm: Operand<i8> {
let PrintMethod = "printcrbitm";
let EncoderMethod = "get_crbitm_encoding";
let DecoderMethod = "decodeCRBitMOperand";
let ParserMatchClass = PPCCRBitMaskOperand;
}
// Address operands
// A version of ptr_rc which excludes R0 (or X0 in 64-bit mode).
def PPCRegGxRCNoR0Operand : AsmOperandClass {
let Name = "RegGxRCNoR0"; let PredicateMethod = "isRegNumber";
}
def ptr_rc_nor0 : Operand<iPTR>, PointerLikeRegClass<1> {
let ParserMatchClass = PPCRegGxRCNoR0Operand;
}
// A version of ptr_rc usable with the asm parser.
def PPCRegGxRCOperand : AsmOperandClass {
let Name = "RegGxRC"; let PredicateMethod = "isRegNumber";
}
def ptr_rc_idx : Operand<iPTR>, PointerLikeRegClass<0> {
let ParserMatchClass = PPCRegGxRCOperand;
}
def PPCDispRIOperand : AsmOperandClass {
let Name = "DispRI"; let PredicateMethod = "isS16Imm";
let RenderMethod = "addS16ImmOperands";
}
def dispRI : Operand<iPTR> {
let ParserMatchClass = PPCDispRIOperand;
}
def PPCDispRIXOperand : AsmOperandClass {
let Name = "DispRIX"; let PredicateMethod = "isS16ImmX4";
let RenderMethod = "addImmOperands";
}
def dispRIX : Operand<iPTR> {
let ParserMatchClass = PPCDispRIXOperand;
}
def PPCDispRIX16Operand : AsmOperandClass {
let Name = "DispRIX16"; let PredicateMethod = "isS16ImmX16";
let RenderMethod = "addImmOperands";
}
def dispRIX16 : Operand<iPTR> {
let ParserMatchClass = PPCDispRIX16Operand;
}
def PPCDispSPE8Operand : AsmOperandClass {
let Name = "DispSPE8"; let PredicateMethod = "isU8ImmX8";
let RenderMethod = "addImmOperands";
}
def dispSPE8 : Operand<iPTR> {
let ParserMatchClass = PPCDispSPE8Operand;
}
def PPCDispSPE4Operand : AsmOperandClass {
let Name = "DispSPE4"; let PredicateMethod = "isU7ImmX4";
let RenderMethod = "addImmOperands";
}
def dispSPE4 : Operand<iPTR> {
let ParserMatchClass = PPCDispSPE4Operand;
}
def PPCDispSPE2Operand : AsmOperandClass {
let Name = "DispSPE2"; let PredicateMethod = "isU6ImmX2";
let RenderMethod = "addImmOperands";
}
def dispSPE2 : Operand<iPTR> {
let ParserMatchClass = PPCDispSPE2Operand;
}
def memri : Operand<iPTR> {
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRI:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIEncoding";
let DecoderMethod = "decodeMemRIOperands";
}
def memrr : Operand<iPTR> {
let PrintMethod = "printMemRegReg";
let MIOperandInfo = (ops ptr_rc_nor0:$ptrreg, ptr_rc_idx:$offreg);
}
def memrix : Operand<iPTR> { // memri where the imm is 4-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRIX:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIXEncoding";
let DecoderMethod = "decodeMemRIXOperands";
}
def memrix16 : Operand<iPTR> { // memri, imm is 16-aligned, 12-bit, Inst{16:27}
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRIX16:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIX16Encoding";
let DecoderMethod = "decodeMemRIX16Operands";
}
def spe8dis : Operand<iPTR> { // SPE displacement where the imm is 8-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispSPE8:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getSPE8DisEncoding";
let DecoderMethod = "decodeSPE8Operands";
}
def spe4dis : Operand<iPTR> { // SPE displacement where the imm is 4-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispSPE4:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getSPE4DisEncoding";
let DecoderMethod = "decodeSPE4Operands";
}
def spe2dis : Operand<iPTR> { // SPE displacement where the imm is 2-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispSPE2:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getSPE2DisEncoding";
let DecoderMethod = "decodeSPE2Operands";
}
// A single-register address. This is used with the SjLj
// pseudo-instructions which tranlates to LD/LWZ. These instructions requires
// G8RC_NOX0 registers.
def memr : Operand<iPTR> {
let MIOperandInfo = (ops ptr_rc_nor0:$ptrreg);
}
def PPCTLSRegOperand : AsmOperandClass {
let Name = "TLSReg"; let PredicateMethod = "isTLSReg";
let RenderMethod = "addTLSRegOperands";
}
def tlsreg32 : Operand<i32> {
let EncoderMethod = "getTLSRegEncoding";
let ParserMatchClass = PPCTLSRegOperand;
}
def tlsgd32 : Operand<i32> {}
def tlscall32 : Operand<i32> {
let PrintMethod = "printTLSCall";
let MIOperandInfo = (ops calltarget:$func, tlsgd32:$sym);
let EncoderMethod = "getTLSCallEncoding";
}
// PowerPC Predicate operand.
def pred : Operand<OtherVT> {
let PrintMethod = "printPredicateOperand";
let MIOperandInfo = (ops i32imm:$bibo, crrc:$reg);
}
// Define PowerPC specific addressing mode.
// d-form
def iaddr : ComplexPattern<iPTR, 2, "SelectAddrImm", [], []>; // "stb"
// ds-form
def iaddrX4 : ComplexPattern<iPTR, 2, "SelectAddrImmX4", [], []>; // "std"
// dq-form
def iaddrX16 : ComplexPattern<iPTR, 2, "SelectAddrImmX16", [], []>; // "stxv"
// Below forms are all x-form addressing mode, use three different ones so we
// can make a accurate check for x-form instructions in ISEL.
// x-form addressing mode whose associated diplacement form is D.
def xaddr : ComplexPattern<iPTR, 2, "SelectAddrIdx", [], []>; // "stbx"
// x-form addressing mode whose associated diplacement form is DS.
def xaddrX4 : ComplexPattern<iPTR, 2, "SelectAddrIdxX4", [], []>; // "stdx"
// x-form addressing mode whose associated diplacement form is DQ.
def xaddrX16 : ComplexPattern<iPTR, 2, "SelectAddrIdxX16", [], []>; // "stxvx"
def xoaddr : ComplexPattern<iPTR, 2, "SelectAddrIdxOnly",[], []>;
// The address in a single register. This is used with the SjLj
// pseudo-instructions.
def addr : ComplexPattern<iPTR, 1, "SelectAddr",[], []>;
/// This is just the offset part of iaddr, used for preinc.
def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>;
//===----------------------------------------------------------------------===//
// PowerPC Instruction Predicate Definitions.
def In32BitMode : Predicate<"!PPCSubTarget->isPPC64()">;
def In64BitMode : Predicate<"PPCSubTarget->isPPC64()">;
def IsBookE : Predicate<"PPCSubTarget->isBookE()">;
def IsNotBookE : Predicate<"!PPCSubTarget->isBookE()">;
def HasOnlyMSYNC : Predicate<"PPCSubTarget->hasOnlyMSYNC()">;
def HasSYNC : Predicate<"!PPCSubTarget->hasOnlyMSYNC()">;
def IsPPC4xx : Predicate<"PPCSubTarget->isPPC4xx()">;
def IsPPC6xx : Predicate<"PPCSubTarget->isPPC6xx()">;
def IsE500 : Predicate<"PPCSubTarget->isE500()">;
def HasSPE : Predicate<"PPCSubTarget->hasSPE()">;
def HasICBT : Predicate<"PPCSubTarget->hasICBT()">;
def HasPartwordAtomics : Predicate<"PPCSubTarget->hasPartwordAtomics()">;
def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
def NaNsFPMath : Predicate<"!TM.Options.NoNaNsFPMath">;
def HasBPERMD : Predicate<"PPCSubTarget->hasBPERMD()">;
def HasExtDiv : Predicate<"PPCSubTarget->hasExtDiv()">;
def IsISA3_0 : Predicate<"PPCSubTarget->isISA3_0()">;
def HasFPU : Predicate<"PPCSubTarget->hasFPU()">;
//===----------------------------------------------------------------------===//
// PowerPC Multiclass Definitions.
multiclass XForm_6r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XForm_6<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XForm_6<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XForm_6rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
let Defs = [CARRY] in
def NAME : XForm_6<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CARRY, CR0] in
def o : XForm_6<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XForm_10rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
let Defs = [CARRY] in
def NAME : XForm_10<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CARRY, CR0] in
def o : XForm_10<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XForm_11r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XForm_11<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XForm_11<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XOForm_1r<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
+// Multiclass for instructions which have a record overflow form as well
+// as a record form but no carry (i.e. mulld, mulldo, subf, subfo, etc.)
+multiclass XOForm_1rx<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
+ string asmbase, string asmstr, InstrItinClass itin,
+ list<dag> pattern> {
+ let BaseName = asmbase in {
+ def NAME : XOForm_1<opcode, xo, 0, OOL, IOL,
+ !strconcat(asmbase, !strconcat(" ", asmstr)), itin,
+ pattern>, RecFormRel;
+ let Defs = [CR0] in
+ def o : XOForm_1<opcode, xo, 0, OOL, IOL,
+ !strconcat(asmbase, !strconcat(". ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
+ let BaseName = !strconcat(asmbase, "O") in {
+ let Defs = [XER] in
+ def O : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o ", asmstr)), itin,
+ []>, RecFormRel;
+ let Defs = [XER, CR0] in
+ def Oo : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o. ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
+}
+
// Multiclass for instructions for which the non record form is not cracked
// and the record form is cracked (i.e. divw, mullw, etc.)
multiclass XOForm_1rcr<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel, PPC970_DGroup_First,
PPC970_DGroup_Cracked;
}
+ let BaseName = !strconcat(asmbase, "O") in {
+ let Defs = [XER] in
+ def O : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o ", asmstr)), itin,
+ []>, RecFormRel;
+ let Defs = [XER, CR0] in
+ def Oo : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o. ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
}
multiclass XOForm_1rc<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
let Defs = [CARRY] in
def NAME : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CARRY, CR0] in
def o : XOForm_1<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
+ let BaseName = !strconcat(asmbase, "O") in {
+ let Defs = [CARRY, XER] in
+ def O : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o ", asmstr)), itin,
+ []>, RecFormRel;
+ let Defs = [CARRY, XER, CR0] in
+ def Oo : XOForm_1<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o. ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
}
multiclass XOForm_3r<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XOForm_3<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XOForm_3<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
+ let BaseName = !strconcat(asmbase, "O") in {
+ let Defs = [XER] in
+ def O : XOForm_3<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o ", asmstr)), itin,
+ []>, RecFormRel;
+ let Defs = [XER, CR0] in
+ def Oo : XOForm_3<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o. ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
}
multiclass XOForm_3rc<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
let Defs = [CARRY] in
def NAME : XOForm_3<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CARRY, CR0] in
def o : XOForm_3<opcode, xo, oe, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
+ let BaseName = !strconcat(asmbase, "O") in {
+ let Defs = [CARRY, XER] in
+ def O : XOForm_3<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o ", asmstr)), itin,
+ []>, RecFormRel;
+ let Defs = [CARRY, XER, CR0] in
+ def Oo : XOForm_3<opcode, xo, 1, OOL, IOL,
+ !strconcat(asmbase, !strconcat("o. ", asmstr)), itin,
+ []>, isDOT, RecFormRel;
+ }
}
multiclass MForm_2r<bits<6> opcode, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : MForm_2<opcode, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : MForm_2<opcode, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass MDForm_1r<bits<6> opcode, bits<3> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : MDForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : MDForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass MDSForm_1r<bits<6> opcode, bits<4> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : MDSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : MDSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XSForm_1rc<bits<6> opcode, bits<9> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
let Defs = [CARRY] in
def NAME : XSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CARRY, CR0] in
def o : XSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XSForm_1r<bits<6> opcode, bits<9> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR0] in
def o : XSForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XForm_26r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XForm_26<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR1] in
def o : XForm_26<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass XForm_28r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : XForm_28<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR1] in
def o : XForm_28<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass AForm_1r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : AForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR1] in
def o : AForm_1<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass AForm_2r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : AForm_2<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR1] in
def o : AForm_2<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
multiclass AForm_3r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
let BaseName = asmbase in {
def NAME : AForm_3<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(" ", asmstr)), itin,
pattern>, RecFormRel;
let Defs = [CR1] in
def o : AForm_3<opcode, xo, OOL, IOL,
!strconcat(asmbase, !strconcat(". ", asmstr)), itin,
[]>, isDOT, RecFormRel;
}
}
//===----------------------------------------------------------------------===//
// PowerPC Instruction Definitions.
// Pseudo instructions:
let hasCtrlDep = 1 in {
let Defs = [R1], Uses = [R1] in {
def ADJCALLSTACKDOWN : PPCEmitTimePseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2),
"#ADJCALLSTACKDOWN $amt1 $amt2",
[(callseq_start timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKUP : PPCEmitTimePseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2),
"#ADJCALLSTACKUP $amt1 $amt2",
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
def UPDATE_VRSAVE : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$rS),
"UPDATE_VRSAVE $rD, $rS", []>;
}
let Defs = [R1], Uses = [R1] in
def DYNALLOC : PPCEmitTimePseudo<(outs gprc:$result), (ins gprc:$negsize, memri:$fpsi), "#DYNALLOC",
[(set i32:$result,
(PPCdynalloc i32:$negsize, iaddr:$fpsi))]>;
def DYNAREAOFFSET : PPCEmitTimePseudo<(outs i32imm:$result), (ins memri:$fpsi), "#DYNAREAOFFSET",
[(set i32:$result, (PPCdynareaoffset iaddr:$fpsi))]>;
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
// instruction selection into a branch sequence.
let PPC970_Single = 1 in {
// Note that SELECT_CC_I4 and SELECT_CC_I8 use the no-r0 register classes
// because either operand might become the first operand in an isel, and
// that operand cannot be r0.
def SELECT_CC_I4 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins crrc:$cond,
gprc_nor0:$T, gprc_nor0:$F,
i32imm:$BROPC), "#SELECT_CC_I4",
[]>;
def SELECT_CC_I8 : PPCCustomInserterPseudo<(outs g8rc:$dst), (ins crrc:$cond,
g8rc_nox0:$T, g8rc_nox0:$F,
i32imm:$BROPC), "#SELECT_CC_I8",
[]>;
def SELECT_CC_F4 : PPCCustomInserterPseudo<(outs f4rc:$dst), (ins crrc:$cond, f4rc:$T, f4rc:$F,
i32imm:$BROPC), "#SELECT_CC_F4",
[]>;
def SELECT_CC_F8 : PPCCustomInserterPseudo<(outs f8rc:$dst), (ins crrc:$cond, f8rc:$T, f8rc:$F,
i32imm:$BROPC), "#SELECT_CC_F8",
[]>;
def SELECT_CC_F16 : PPCCustomInserterPseudo<(outs vrrc:$dst), (ins crrc:$cond, vrrc:$T, vrrc:$F,
i32imm:$BROPC), "#SELECT_CC_F16",
[]>;
def SELECT_CC_VRRC: PPCCustomInserterPseudo<(outs vrrc:$dst), (ins crrc:$cond, vrrc:$T, vrrc:$F,
i32imm:$BROPC), "#SELECT_CC_VRRC",
[]>;
// SELECT_* pseudo instructions, like SELECT_CC_* but taking condition
// register bit directly.
def SELECT_I4 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins crbitrc:$cond,
gprc_nor0:$T, gprc_nor0:$F), "#SELECT_I4",
[(set i32:$dst, (select i1:$cond, i32:$T, i32:$F))]>;
def SELECT_I8 : PPCCustomInserterPseudo<(outs g8rc:$dst), (ins crbitrc:$cond,
g8rc_nox0:$T, g8rc_nox0:$F), "#SELECT_I8",
[(set i64:$dst, (select i1:$cond, i64:$T, i64:$F))]>;
let Predicates = [HasFPU] in {
def SELECT_F4 : PPCCustomInserterPseudo<(outs f4rc:$dst), (ins crbitrc:$cond,
f4rc:$T, f4rc:$F), "#SELECT_F4",
[(set f32:$dst, (select i1:$cond, f32:$T, f32:$F))]>;
def SELECT_F8 : PPCCustomInserterPseudo<(outs f8rc:$dst), (ins crbitrc:$cond,
f8rc:$T, f8rc:$F), "#SELECT_F8",
[(set f64:$dst, (select i1:$cond, f64:$T, f64:$F))]>;
def SELECT_F16 : PPCCustomInserterPseudo<(outs vrrc:$dst), (ins crbitrc:$cond,
vrrc:$T, vrrc:$F), "#SELECT_F16",
[(set f128:$dst, (select i1:$cond, f128:$T, f128:$F))]>;
}
def SELECT_VRRC: PPCCustomInserterPseudo<(outs vrrc:$dst), (ins crbitrc:$cond,
vrrc:$T, vrrc:$F), "#SELECT_VRRC",
[(set v4i32:$dst,
(select i1:$cond, v4i32:$T, v4i32:$F))]>;
}
// SPILL_CR - Indicate that we're dumping the CR register, so we'll need to
// scavenge a register for it.
let mayStore = 1 in {
def SPILL_CR : PPCEmitTimePseudo<(outs), (ins crrc:$cond, memri:$F),
"#SPILL_CR", []>;
def SPILL_CRBIT : PPCEmitTimePseudo<(outs), (ins crbitrc:$cond, memri:$F),
"#SPILL_CRBIT", []>;
}
// RESTORE_CR - Indicate that we're restoring the CR register (previously
// spilled), so we'll need to scavenge a register for it.
let mayLoad = 1 in {
def RESTORE_CR : PPCEmitTimePseudo<(outs crrc:$cond), (ins memri:$F),
"#RESTORE_CR", []>;
def RESTORE_CRBIT : PPCEmitTimePseudo<(outs crbitrc:$cond), (ins memri:$F),
"#RESTORE_CRBIT", []>;
}
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isReturn = 1, Uses = [LR, RM] in
def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB,
[(retflag)]>, Requires<[In32BitMode]>;
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in {
def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>;
let isCodeGenOnly = 1 in {
def BCCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
"b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB,
[]>;
def BCCTR : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$bi),
"bcctr 12, $bi, 0", IIC_BrB, []>;
def BCCTRn : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$bi),
"bcctr 4, $bi, 0", IIC_BrB, []>;
}
}
}
// Set the float rounding mode.
let Uses = [RM], Defs = [RM] in {
def SETRNDi : PPCCustomInserterPseudo<(outs f8rc:$FRT), (ins u2imm:$RND),
"#SETRNDi", [(set f64:$FRT, (int_ppc_setrnd (i32 imm:$RND)))]>;
def SETRND : PPCCustomInserterPseudo<(outs f8rc:$FRT), (ins gprc:$in),
"#SETRND", [(set f64:$FRT, (int_ppc_setrnd gprc :$in))]>;
}
let Defs = [LR] in
def MovePCtoLR : PPCEmitTimePseudo<(outs), (ins), "#MovePCtoLR", []>,
PPC970_Unit_BRU;
let Defs = [LR] in
def MoveGOTtoLR : PPCEmitTimePseudo<(outs), (ins), "#MoveGOTtoLR", []>,
PPC970_Unit_BRU;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let isBarrier = 1 in {
def B : IForm<18, 0, 0, (outs), (ins directbrtarget:$dst),
"b $dst", IIC_BrB,
[(br bb:$dst)]>;
def BA : IForm<18, 1, 0, (outs), (ins absdirectbrtarget:$dst),
"ba $dst", IIC_BrB, []>;
}
// BCC represents an arbitrary conditional branch on a predicate.
// FIXME: should be able to write a pattern for PPCcondbranch, but can't use
// a two-value operand where a dag node expects two operands. :(
let isCodeGenOnly = 1 in {
class BCC_class : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
"b${cond:cc}${cond:pm} ${cond:reg}, $dst"
/*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>;
def BCC : BCC_class;
// The same as BCC, except that it's not a terminator. Used for introducing
// control flow dependency without creating new blocks.
let isTerminator = 0 in def CTRL_DEP : BCC_class;
def BCCA : BForm<16, 1, 0, (outs), (ins pred:$cond, abscondbrtarget:$dst),
"b${cond:cc}a${cond:pm} ${cond:reg}, $dst">;
let isReturn = 1, Uses = [LR, RM] in
def BCCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond),
"b${cond:cc}lr${cond:pm} ${cond:reg}", IIC_BrB, []>;
}
let isCodeGenOnly = 1 in {
let Pattern = [(brcond i1:$bi, bb:$dst)] in
def BC : BForm_4<16, 12, 0, 0, (outs), (ins crbitrc:$bi, condbrtarget:$dst),
"bc 12, $bi, $dst">;
let Pattern = [(brcond (not i1:$bi), bb:$dst)] in
def BCn : BForm_4<16, 4, 0, 0, (outs), (ins crbitrc:$bi, condbrtarget:$dst),
"bc 4, $bi, $dst">;
let isReturn = 1, Uses = [LR, RM] in
def BCLR : XLForm_2_br2<19, 16, 12, 0, (outs), (ins crbitrc:$bi),
"bclr 12, $bi, 0", IIC_BrB, []>;
def BCLRn : XLForm_2_br2<19, 16, 4, 0, (outs), (ins crbitrc:$bi),
"bclr 4, $bi, 0", IIC_BrB, []>;
}
let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in {
def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
"bdzlr", IIC_BrB, []>;
def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
"bdnzlr", IIC_BrB, []>;
def BDZLRp : XLForm_2_ext<19, 16, 27, 0, 0, (outs), (ins),
"bdzlr+", IIC_BrB, []>;
def BDNZLRp: XLForm_2_ext<19, 16, 25, 0, 0, (outs), (ins),
"bdnzlr+", IIC_BrB, []>;
def BDZLRm : XLForm_2_ext<19, 16, 26, 0, 0, (outs), (ins),
"bdzlr-", IIC_BrB, []>;
def BDNZLRm: XLForm_2_ext<19, 16, 24, 0, 0, (outs), (ins),
"bdnzlr-", IIC_BrB, []>;
}
let Defs = [CTR], Uses = [CTR] in {
def BDZ : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
"bdz $dst">;
def BDNZ : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
"bdnz $dst">;
def BDZA : BForm_1<16, 18, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdza $dst">;
def BDNZA : BForm_1<16, 16, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdnza $dst">;
def BDZp : BForm_1<16, 27, 0, 0, (outs), (ins condbrtarget:$dst),
"bdz+ $dst">;
def BDNZp: BForm_1<16, 25, 0, 0, (outs), (ins condbrtarget:$dst),
"bdnz+ $dst">;
def BDZAp : BForm_1<16, 27, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdza+ $dst">;
def BDNZAp: BForm_1<16, 25, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdnza+ $dst">;
def BDZm : BForm_1<16, 26, 0, 0, (outs), (ins condbrtarget:$dst),
"bdz- $dst">;
def BDNZm: BForm_1<16, 24, 0, 0, (outs), (ins condbrtarget:$dst),
"bdnz- $dst">;
def BDZAm : BForm_1<16, 26, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdza- $dst">;
def BDNZAm: BForm_1<16, 24, 1, 0, (outs), (ins abscondbrtarget:$dst),
"bdnza- $dst">;
}
}
// The unconditional BCL used by the SjLj setjmp code.
let isCall = 1, hasCtrlDep = 1, isCodeGenOnly = 1, PPC970_Unit = 7 in {
let Defs = [LR], Uses = [RM] in {
def BCLalways : BForm_2<16, 20, 31, 0, 1, (outs), (ins condbrtarget:$dst),
"bcl 20, 31, $dst">;
}
}
let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL : IForm<18, 0, 1, (outs), (ins calltarget:$func),
"bl $func", IIC_BrB, []>; // See Pat patterns below.
def BLA : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
"bla $func", IIC_BrB, [(PPCcall (i32 imm:$func))]>;
let isCodeGenOnly = 1 in {
def BL_TLS : IForm<18, 0, 1, (outs), (ins tlscall32:$func),
"bl $func", IIC_BrB, []>;
def BCCL : BForm<16, 0, 1, (outs), (ins pred:$cond, condbrtarget:$dst),
"b${cond:cc}l${cond:pm} ${cond:reg}, $dst">;
def BCCLA : BForm<16, 1, 1, (outs), (ins pred:$cond, abscondbrtarget:$dst),
"b${cond:cc}la${cond:pm} ${cond:reg}, $dst">;
def BCL : BForm_4<16, 12, 0, 1, (outs),
(ins crbitrc:$bi, condbrtarget:$dst),
"bcl 12, $bi, $dst">;
def BCLn : BForm_4<16, 4, 0, 1, (outs),
(ins crbitrc:$bi, condbrtarget:$dst),
"bcl 4, $bi, $dst">;
def BL_NOP : IForm_and_DForm_4_zero<18, 0, 1, 24,
(outs), (ins calltarget:$func),
"bl $func\n\tnop", IIC_BrB, []>;
}
}
let Uses = [CTR, RM] in {
def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins),
"bctrl", IIC_BrB, [(PPCbctrl)]>,
Requires<[In32BitMode]>;
let isCodeGenOnly = 1 in {
def BCCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
"b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB,
[]>;
def BCCTRL : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$bi),
"bcctrl 12, $bi, 0", IIC_BrB, []>;
def BCCTRLn : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$bi),
"bcctrl 4, $bi, 0", IIC_BrB, []>;
}
}
let Uses = [LR, RM] in {
def BLRL : XLForm_2_ext<19, 16, 20, 0, 1, (outs), (ins),
"blrl", IIC_BrB, []>;
let isCodeGenOnly = 1 in {
def BCCLRL : XLForm_2_br<19, 16, 1, (outs), (ins pred:$cond),
"b${cond:cc}lrl${cond:pm} ${cond:reg}", IIC_BrB,
[]>;
def BCLRL : XLForm_2_br2<19, 16, 12, 1, (outs), (ins crbitrc:$bi),
"bclrl 12, $bi, 0", IIC_BrB, []>;
def BCLRLn : XLForm_2_br2<19, 16, 4, 1, (outs), (ins crbitrc:$bi),
"bclrl 4, $bi, 0", IIC_BrB, []>;
}
}
let Defs = [CTR], Uses = [CTR, RM] in {
def BDZL : BForm_1<16, 18, 0, 1, (outs), (ins condbrtarget:$dst),
"bdzl $dst">;
def BDNZL : BForm_1<16, 16, 0, 1, (outs), (ins condbrtarget:$dst),
"bdnzl $dst">;
def BDZLA : BForm_1<16, 18, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdzla $dst">;
def BDNZLA : BForm_1<16, 16, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdnzla $dst">;
def BDZLp : BForm_1<16, 27, 0, 1, (outs), (ins condbrtarget:$dst),
"bdzl+ $dst">;
def BDNZLp: BForm_1<16, 25, 0, 1, (outs), (ins condbrtarget:$dst),
"bdnzl+ $dst">;
def BDZLAp : BForm_1<16, 27, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdzla+ $dst">;
def BDNZLAp: BForm_1<16, 25, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdnzla+ $dst">;
def BDZLm : BForm_1<16, 26, 0, 1, (outs), (ins condbrtarget:$dst),
"bdzl- $dst">;
def BDNZLm: BForm_1<16, 24, 0, 1, (outs), (ins condbrtarget:$dst),
"bdnzl- $dst">;
def BDZLAm : BForm_1<16, 26, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdzla- $dst">;
def BDNZLAm: BForm_1<16, 24, 1, 1, (outs), (ins abscondbrtarget:$dst),
"bdnzla- $dst">;
}
let Defs = [CTR], Uses = [CTR, LR, RM] in {
def BDZLRL : XLForm_2_ext<19, 16, 18, 0, 1, (outs), (ins),
"bdzlrl", IIC_BrB, []>;
def BDNZLRL : XLForm_2_ext<19, 16, 16, 0, 1, (outs), (ins),
"bdnzlrl", IIC_BrB, []>;
def BDZLRLp : XLForm_2_ext<19, 16, 27, 0, 1, (outs), (ins),
"bdzlrl+", IIC_BrB, []>;
def BDNZLRLp: XLForm_2_ext<19, 16, 25, 0, 1, (outs), (ins),
"bdnzlrl+", IIC_BrB, []>;
def BDZLRLm : XLForm_2_ext<19, 16, 26, 0, 1, (outs), (ins),
"bdzlrl-", IIC_BrB, []>;
def BDNZLRLm: XLForm_2_ext<19, 16, 24, 0, 1, (outs), (ins),
"bdnzlrl-", IIC_BrB, []>;
}
}
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi :PPCEmitTimePseudo< (outs),
(ins calltarget:$dst, i32imm:$offset),
"#TC_RETURNd $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNai :PPCEmitTimePseudo<(outs), (ins abscalltarget:$func, i32imm:$offset),
"#TC_RETURNa $func $offset",
[(PPCtc_return (i32 imm:$func), imm:$offset)]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNri : PPCEmitTimePseudo<(outs), (ins CTRRC:$dst, i32imm:$offset),
"#TC_RETURNr $dst $offset",
[]>;
let isCodeGenOnly = 1 in {
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR, RM] in
def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>, Requires<[In32BitMode]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
"b $dst", IIC_BrB,
[]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILBA : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
"ba $dst", IIC_BrB,
[]>;
}
// While longjmp is a control-flow barrier (fallthrough isn't allowed), setjmp
// is not.
let hasSideEffects = 1 in {
let Defs = [CTR] in
def EH_SjLj_SetJmp32 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf),
"#EH_SJLJ_SETJMP32",
[(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>,
Requires<[In32BitMode]>;
}
let hasSideEffects = 1, isBarrier = 1 in {
let isTerminator = 1 in
def EH_SjLj_LongJmp32 : PPCCustomInserterPseudo<(outs), (ins memr:$buf),
"#EH_SJLJ_LONGJMP32",
[(PPCeh_sjlj_longjmp addr:$buf)]>,
Requires<[In32BitMode]>;
}
// This pseudo is never removed from the function, as it serves as
// a terminator. Size is set to 0 to prevent the builtin assembler
// from emitting it.
let isBranch = 1, isTerminator = 1, Size = 0 in {
def EH_SjLj_Setup : PPCEmitTimePseudo<(outs), (ins directbrtarget:$dst),
"#EH_SjLj_Setup\t$dst", []>;
}
// System call.
let PPC970_Unit = 7 in {
def SC : SCForm<17, 1, (outs), (ins i32imm:$lev),
"sc $lev", IIC_BrB, [(PPCsc (i32 imm:$lev))]>;
}
// Branch history rolling buffer.
def CLRBHRB : XForm_0<31, 430, (outs), (ins), "clrbhrb", IIC_BrB,
[(PPCclrbhrb)]>,
PPC970_DGroup_Single;
// The $dmy argument used for MFBHRBE is not needed; however, including
// it avoids automatic generation of PPCFastISel::fastEmit_i(), which
// interferes with necessary special handling (see PPCFastISel.cpp).
def MFBHRBE : XFXForm_3p<31, 302, (outs gprc:$rD),
(ins u10imm:$imm, u10imm:$dmy),
"mfbhrbe $rD, $imm", IIC_BrB,
[(set i32:$rD,
(PPCmfbhrbe imm:$imm, imm:$dmy))]>,
PPC970_DGroup_First;
def RFEBB : XLForm_S<19, 146, (outs), (ins u1imm:$imm), "rfebb $imm",
IIC_BrB, [(PPCrfebb (i32 imm:$imm))]>,
PPC970_DGroup_Single;
// DCB* instructions.
def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst), "dcba $dst",
IIC_LdStDCBF, [(int_ppc_dcba xoaddr:$dst)]>,
PPC970_DGroup_Single;
def DCBI : DCB_Form<470, 0, (outs), (ins memrr:$dst), "dcbi $dst",
IIC_LdStDCBF, [(int_ppc_dcbi xoaddr:$dst)]>,
PPC970_DGroup_Single;
def DCBST : DCB_Form<54, 0, (outs), (ins memrr:$dst), "dcbst $dst",
IIC_LdStDCBF, [(int_ppc_dcbst xoaddr:$dst)]>,
PPC970_DGroup_Single;
def DCBZ : DCB_Form<1014, 0, (outs), (ins memrr:$dst), "dcbz $dst",
IIC_LdStDCBF, [(int_ppc_dcbz xoaddr:$dst)]>,
PPC970_DGroup_Single;
def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst), "dcbzl $dst",
IIC_LdStDCBF, [(int_ppc_dcbzl xoaddr:$dst)]>,
PPC970_DGroup_Single;
def DCBF : DCB_Form_hint<86, (outs), (ins u5imm:$TH, memrr:$dst),
"dcbf $dst, $TH", IIC_LdStDCBF, []>,
PPC970_DGroup_Single;
let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
def DCBT : DCB_Form_hint<278, (outs), (ins u5imm:$TH, memrr:$dst),
"dcbt $dst, $TH", IIC_LdStDCBF, []>,
PPC970_DGroup_Single;
def DCBTST : DCB_Form_hint<246, (outs), (ins u5imm:$TH, memrr:$dst),
"dcbtst $dst, $TH", IIC_LdStDCBF, []>,
PPC970_DGroup_Single;
} // hasSideEffects = 0
def ICBLC : XForm_icbt<31, 230, (outs), (ins u4imm:$CT, memrr:$src),
"icblc $CT, $src", IIC_LdStStore>, Requires<[HasICBT]>;
def ICBLQ : XForm_icbt<31, 198, (outs), (ins u4imm:$CT, memrr:$src),
"icblq. $CT, $src", IIC_LdStLoad>, Requires<[HasICBT]>;
def ICBT : XForm_icbt<31, 22, (outs), (ins u4imm:$CT, memrr:$src),
"icbt $CT, $src", IIC_LdStLoad>, Requires<[HasICBT]>;
def ICBTLS : XForm_icbt<31, 486, (outs), (ins u4imm:$CT, memrr:$src),
"icbtls $CT, $src", IIC_LdStLoad>, Requires<[HasICBT]>;
def : Pat<(int_ppc_dcbt xoaddr:$dst),
(DCBT 0, xoaddr:$dst)>;
def : Pat<(int_ppc_dcbtst xoaddr:$dst),
(DCBTST 0, xoaddr:$dst)>;
def : Pat<(int_ppc_dcbf xoaddr:$dst),
(DCBF 0, xoaddr:$dst)>;
def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)),
(DCBT 0, xoaddr:$dst)>; // data prefetch for loads
def : Pat<(prefetch xoaddr:$dst, (i32 1), imm, (i32 1)),
(DCBTST 0, xoaddr:$dst)>; // data prefetch for stores
def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 0)),
(ICBT 0, xoaddr:$dst)>, Requires<[HasICBT]>; // inst prefetch (for read)
// Atomic operations
// FIXME: some of these might be used with constant operands. This will result
// in constant materialization instructions that may be redundant. We currently
// clean this up in PPCMIPeephole with calls to
// PPCInstrInfo::convertToImmediateForm() but we should probably not emit them
// in the first place.
let Defs = [CR0] in {
def ATOMIC_LOAD_ADD_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I8",
[(set i32:$dst, (atomic_load_add_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_SUB_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I8",
[(set i32:$dst, (atomic_load_sub_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_AND_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I8",
[(set i32:$dst, (atomic_load_and_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_OR_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I8",
[(set i32:$dst, (atomic_load_or_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_XOR_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "ATOMIC_LOAD_XOR_I8",
[(set i32:$dst, (atomic_load_xor_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_NAND_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I8",
[(set i32:$dst, (atomic_load_nand_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MIN_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I8",
[(set i32:$dst, (atomic_load_min_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MAX_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I8",
[(set i32:$dst, (atomic_load_max_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMIN_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I8",
[(set i32:$dst, (atomic_load_umin_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMAX_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I8",
[(set i32:$dst, (atomic_load_umax_8 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_ADD_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I16",
[(set i32:$dst, (atomic_load_add_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_SUB_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I16",
[(set i32:$dst, (atomic_load_sub_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_AND_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I16",
[(set i32:$dst, (atomic_load_and_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_OR_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I16",
[(set i32:$dst, (atomic_load_or_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_XOR_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_XOR_I16",
[(set i32:$dst, (atomic_load_xor_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_NAND_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I16",
[(set i32:$dst, (atomic_load_nand_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MIN_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I16",
[(set i32:$dst, (atomic_load_min_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MAX_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I16",
[(set i32:$dst, (atomic_load_max_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMIN_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I16",
[(set i32:$dst, (atomic_load_umin_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMAX_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I16",
[(set i32:$dst, (atomic_load_umax_16 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_ADD_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I32",
[(set i32:$dst, (atomic_load_add_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I32",
[(set i32:$dst, (atomic_load_sub_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_AND_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I32",
[(set i32:$dst, (atomic_load_and_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_OR_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I32",
[(set i32:$dst, (atomic_load_or_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_XOR_I32",
[(set i32:$dst, (atomic_load_xor_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I32",
[(set i32:$dst, (atomic_load_nand_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MIN_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I32",
[(set i32:$dst, (atomic_load_min_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_MAX_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I32",
[(set i32:$dst, (atomic_load_max_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMIN_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I32",
[(set i32:$dst, (atomic_load_umin_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_LOAD_UMAX_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I32",
[(set i32:$dst, (atomic_load_umax_32 xoaddr:$ptr, i32:$incr))]>;
def ATOMIC_CMP_SWAP_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I8",
[(set i32:$dst, (atomic_cmp_swap_8 xoaddr:$ptr, i32:$old, i32:$new))]>;
def ATOMIC_CMP_SWAP_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I16 $dst $ptr $old $new",
[(set i32:$dst, (atomic_cmp_swap_16 xoaddr:$ptr, i32:$old, i32:$new))]>;
def ATOMIC_CMP_SWAP_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I32 $dst $ptr $old $new",
[(set i32:$dst, (atomic_cmp_swap_32 xoaddr:$ptr, i32:$old, i32:$new))]>;
def ATOMIC_SWAP_I8 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_i8",
[(set i32:$dst, (atomic_swap_8 xoaddr:$ptr, i32:$new))]>;
def ATOMIC_SWAP_I16 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_I16",
[(set i32:$dst, (atomic_swap_16 xoaddr:$ptr, i32:$new))]>;
def ATOMIC_SWAP_I32 : PPCCustomInserterPseudo<
(outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_I32",
[(set i32:$dst, (atomic_swap_32 xoaddr:$ptr, i32:$new))]>;
}
def : Pat<(PPCatomicCmpSwap_8 xoaddr:$ptr, i32:$old, i32:$new),
(ATOMIC_CMP_SWAP_I8 xoaddr:$ptr, i32:$old, i32:$new)>;
def : Pat<(PPCatomicCmpSwap_16 xoaddr:$ptr, i32:$old, i32:$new),
(ATOMIC_CMP_SWAP_I16 xoaddr:$ptr, i32:$old, i32:$new)>;
// Instructions to support atomic operations
let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in {
def LBARX : XForm_1_memOp<31, 52, (outs gprc:$rD), (ins memrr:$src),
"lbarx $rD, $src", IIC_LdStLWARX, []>,
Requires<[HasPartwordAtomics]>;
def LHARX : XForm_1_memOp<31, 116, (outs gprc:$rD), (ins memrr:$src),
"lharx $rD, $src", IIC_LdStLWARX, []>,
Requires<[HasPartwordAtomics]>;
def LWARX : XForm_1_memOp<31, 20, (outs gprc:$rD), (ins memrr:$src),
"lwarx $rD, $src", IIC_LdStLWARX, []>;
// Instructions to support lock versions of atomics
// (EH=1 - see Power ISA 2.07 Book II 4.4.2)
def LBARXL : XForm_1_memOp<31, 52, (outs gprc:$rD), (ins memrr:$src),
"lbarx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT,
Requires<[HasPartwordAtomics]>;
def LHARXL : XForm_1_memOp<31, 116, (outs gprc:$rD), (ins memrr:$src),
"lharx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT,
Requires<[HasPartwordAtomics]>;
def LWARXL : XForm_1_memOp<31, 20, (outs gprc:$rD), (ins memrr:$src),
"lwarx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT;
// The atomic instructions use the destination register as well as the next one
// or two registers in order (modulo 31).
let hasExtraSrcRegAllocReq = 1 in
def LWAT : X_RD5_RS5_IM5<31, 582, (outs gprc:$rD), (ins gprc:$rA, u5imm:$FC),
"lwat $rD, $rA, $FC", IIC_LdStLoad>,
Requires<[IsISA3_0]>;
}
let Defs = [CR0], mayStore = 1, mayLoad = 0, hasSideEffects = 0 in {
def STBCX : XForm_1_memOp<31, 694, (outs), (ins gprc:$rS, memrr:$dst),
"stbcx. $rS, $dst", IIC_LdStSTWCX, []>,
isDOT, Requires<[HasPartwordAtomics]>;
def STHCX : XForm_1_memOp<31, 726, (outs), (ins gprc:$rS, memrr:$dst),
"sthcx. $rS, $dst", IIC_LdStSTWCX, []>,
isDOT, Requires<[HasPartwordAtomics]>;
def STWCX : XForm_1_memOp<31, 150, (outs), (ins gprc:$rS, memrr:$dst),
"stwcx. $rS, $dst", IIC_LdStSTWCX, []>, isDOT;
}
let mayStore = 1, mayLoad = 0, hasSideEffects = 0 in
def STWAT : X_RD5_RS5_IM5<31, 710, (outs), (ins gprc:$rS, gprc:$rA, u5imm:$FC),
"stwat $rS, $rA, $FC", IIC_LdStStore>,
Requires<[IsISA3_0]>;
let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
def TRAP : XForm_24<31, 4, (outs), (ins), "trap", IIC_LdStLoad, [(trap)]>;
def TWI : DForm_base<3, (outs), (ins u5imm:$to, gprc:$rA, s16imm:$imm),
"twi $to, $rA, $imm", IIC_IntTrapW, []>;
def TW : XForm_1<31, 4, (outs), (ins u5imm:$to, gprc:$rA, gprc:$rB),
"tw $to, $rA, $rB", IIC_IntTrapW, []>;
def TDI : DForm_base<2, (outs), (ins u5imm:$to, g8rc:$rA, s16imm:$imm),
"tdi $to, $rA, $imm", IIC_IntTrapD, []>;
def TD : XForm_1<31, 68, (outs), (ins u5imm:$to, g8rc:$rA, g8rc:$rB),
"td $to, $rA, $rB", IIC_IntTrapD, []>;
//===----------------------------------------------------------------------===//
// PPC32 Load Instructions.
//
// Unindexed (r+i) Loads.
let PPC970_Unit = 2 in {
def LBZ : DForm_1<34, (outs gprc:$rD), (ins memri:$src),
"lbz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi8 iaddr:$src))]>;
def LHA : DForm_1<42, (outs gprc:$rD), (ins memri:$src),
"lha $rD, $src", IIC_LdStLHA,
[(set i32:$rD, (sextloadi16 iaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZ : DForm_1<40, (outs gprc:$rD), (ins memri:$src),
"lhz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ : DForm_1<32, (outs gprc:$rD), (ins memri:$src),
"lwz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (load iaddr:$src))]>;
let Predicates = [HasFPU] in {
def LFS : DForm_1<48, (outs f4rc:$rD), (ins memri:$src),
"lfs $rD, $src", IIC_LdStLFD,
[(set f32:$rD, (load iaddr:$src))]>;
def LFD : DForm_1<50, (outs f8rc:$rD), (ins memri:$src),
"lfd $rD, $src", IIC_LdStLFD,
[(set f64:$rD, (load iaddr:$src))]>;
}
// Unindexed (r+i) Loads with Update (preinc).
let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in {
def LBZU : DForm_1<35, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHAU : DForm_1<43, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lhau $rD, $addr", IIC_LdStLHAU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU : DForm_1<41, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lhzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU : DForm_1<33, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lwzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
let Predicates = [HasFPU] in {
def LFSU : DForm_1<49, (outs f4rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lfsu $rD, $addr", IIC_LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LFDU : DForm_1<51, (outs f8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lfdu $rD, $addr", IIC_LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
}
// Indexed (r+r) Loads with Update (preinc).
def LBZUX : XForm_1_memOp<31, 119, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lbzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHAUX : XForm_1_memOp<31, 375, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lhaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHZUX : XForm_1_memOp<31, 311, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lhzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX : XForm_1_memOp<31, 55, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lwzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
let Predicates = [HasFPU] in {
def LFSUX : XForm_1_memOp<31, 567, (outs f4rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lfsux $rD, $addr", IIC_LdStLFDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LFDUX : XForm_1_memOp<31, 631, (outs f8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
"lfdux $rD, $addr", IIC_LdStLFDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
}
}
}
// Indexed (r+r) Loads.
//
let PPC970_Unit = 2, mayLoad = 1, mayStore = 0 in {
def LBZX : XForm_1_memOp<31, 87, (outs gprc:$rD), (ins memrr:$src),
"lbzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi8 xaddr:$src))]>;
def LHAX : XForm_1_memOp<31, 343, (outs gprc:$rD), (ins memrr:$src),
"lhax $rD, $src", IIC_LdStLHA,
[(set i32:$rD, (sextloadi16 xaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZX : XForm_1_memOp<31, 279, (outs gprc:$rD), (ins memrr:$src),
"lhzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX : XForm_1_memOp<31, 23, (outs gprc:$rD), (ins memrr:$src),
"lwzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (load xaddr:$src))]>;
def LHBRX : XForm_1_memOp<31, 790, (outs gprc:$rD), (ins memrr:$src),
"lhbrx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (PPClbrx xoaddr:$src, i16))]>;
def LWBRX : XForm_1_memOp<31, 534, (outs gprc:$rD), (ins memrr:$src),
"lwbrx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (PPClbrx xoaddr:$src, i32))]>;
let Predicates = [HasFPU] in {
def LFSX : XForm_25_memOp<31, 535, (outs f4rc:$frD), (ins memrr:$src),
"lfsx $frD, $src", IIC_LdStLFD,
[(set f32:$frD, (load xaddr:$src))]>;
def LFDX : XForm_25_memOp<31, 599, (outs f8rc:$frD), (ins memrr:$src),
"lfdx $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (load xaddr:$src))]>;
def LFIWAX : XForm_25_memOp<31, 855, (outs f8rc:$frD), (ins memrr:$src),
"lfiwax $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (PPClfiwax xoaddr:$src))]>;
def LFIWZX : XForm_25_memOp<31, 887, (outs f8rc:$frD), (ins memrr:$src),
"lfiwzx $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (PPClfiwzx xoaddr:$src))]>;
}
}
// Load Multiple
def LMW : DForm_1<46, (outs gprc:$rD), (ins memri:$src),
"lmw $rD, $src", IIC_LdStLMW, []>;
//===----------------------------------------------------------------------===//
// PPC32 Store Instructions.
//
// Unindexed (r+i) Stores.
let PPC970_Unit = 2, mayStore = 1, mayLoad = 0 in {
def STB : DForm_1<38, (outs), (ins gprc:$rS, memri:$dst),
"stb $rS, $dst", IIC_LdStStore,
[(truncstorei8 i32:$rS, iaddr:$dst)]>;
def STH : DForm_1<44, (outs), (ins gprc:$rS, memri:$dst),
"sth $rS, $dst", IIC_LdStStore,
[(truncstorei16 i32:$rS, iaddr:$dst)]>;
def STW : DForm_1<36, (outs), (ins gprc:$rS, memri:$dst),
"stw $rS, $dst", IIC_LdStStore,
[(store i32:$rS, iaddr:$dst)]>;
let Predicates = [HasFPU] in {
def STFS : DForm_1<52, (outs), (ins f4rc:$rS, memri:$dst),
"stfs $rS, $dst", IIC_LdStSTFD,
[(store f32:$rS, iaddr:$dst)]>;
def STFD : DForm_1<54, (outs), (ins f8rc:$rS, memri:$dst),
"stfd $rS, $dst", IIC_LdStSTFD,
[(store f64:$rS, iaddr:$dst)]>;
}
}
// Unindexed (r+i) Stores with Update (preinc).
let PPC970_Unit = 2, mayStore = 1, mayLoad = 0 in {
def STBU : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
"stbu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STHU : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
"sthu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STWU : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
"stwu $rS, $dst", IIC_LdStSTU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
let Predicates = [HasFPU] in {
def STFSU : DForm_1<53, (outs ptr_rc_nor0:$ea_res), (ins f4rc:$rS, memri:$dst),
"stfsu $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STFDU : DForm_1<55, (outs ptr_rc_nor0:$ea_res), (ins f8rc:$rS, memri:$dst),
"stfdu $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
}
}
// Patterns to match the pre-inc stores. We can't put the patterns on
// the instruction definitions directly as ISel wants the address base
// and offset to be separate operands, not a single complex operand.
def : Pat<(pre_truncsti8 i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STBU $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_truncsti16 i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STHU $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_store i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STWU $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STFSU $rS, iaddroff:$ptroff, $ptrreg)>;
def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
(STFDU $rS, iaddroff:$ptroff, $ptrreg)>;
// Indexed (r+r) Stores.
let PPC970_Unit = 2 in {
def STBX : XForm_8_memOp<31, 215, (outs), (ins gprc:$rS, memrr:$dst),
"stbx $rS, $dst", IIC_LdStStore,
[(truncstorei8 i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX : XForm_8_memOp<31, 407, (outs), (ins gprc:$rS, memrr:$dst),
"sthx $rS, $dst", IIC_LdStStore,
[(truncstorei16 i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX : XForm_8_memOp<31, 151, (outs), (ins gprc:$rS, memrr:$dst),
"stwx $rS, $dst", IIC_LdStStore,
[(store i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHBRX: XForm_8_memOp<31, 918, (outs), (ins gprc:$rS, memrr:$dst),
"sthbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i32:$rS, xoaddr:$dst, i16)]>,
PPC970_DGroup_Cracked;
def STWBRX: XForm_8_memOp<31, 662, (outs), (ins gprc:$rS, memrr:$dst),
"stwbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i32:$rS, xoaddr:$dst, i32)]>,
PPC970_DGroup_Cracked;
let Predicates = [HasFPU] in {
def STFIWX: XForm_28_memOp<31, 983, (outs), (ins f8rc:$frS, memrr:$dst),
"stfiwx $frS, $dst", IIC_LdStSTFD,
[(PPCstfiwx f64:$frS, xoaddr:$dst)]>;
def STFSX : XForm_28_memOp<31, 663, (outs), (ins f4rc:$frS, memrr:$dst),
"stfsx $frS, $dst", IIC_LdStSTFD,
[(store f32:$frS, xaddr:$dst)]>;
def STFDX : XForm_28_memOp<31, 727, (outs), (ins f8rc:$frS, memrr:$dst),
"stfdx $frS, $dst", IIC_LdStSTFD,
[(store f64:$frS, xaddr:$dst)]>;
}
}
// Indexed (r+r) Stores with Update (preinc).
let PPC970_Unit = 2, mayStore = 1, mayLoad = 0 in {
def STBUX : XForm_8_memOp<31, 247, (outs ptr_rc_nor0:$ea_res),
(ins gprc:$rS, memrr:$dst),
"stbux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STHUX : XForm_8_memOp<31, 439, (outs ptr_rc_nor0:$ea_res),
(ins gprc:$rS, memrr:$dst),
"sthux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STWUX : XForm_8_memOp<31, 183, (outs ptr_rc_nor0:$ea_res),
(ins gprc:$rS, memrr:$dst),
"stwux $rS, $dst", IIC_LdStSTUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
let Predicates = [HasFPU] in {
def STFSUX: XForm_8_memOp<31, 695, (outs ptr_rc_nor0:$ea_res),
(ins f4rc:$rS, memrr:$dst),
"stfsux $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STFDUX: XForm_8_memOp<31, 759, (outs ptr_rc_nor0:$ea_res),
(ins f8rc:$rS, memrr:$dst),
"stfdux $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.ptrreg = $ea_res">,
NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
}
}
// Patterns to match the pre-inc stores. We can't put the patterns on
// the instruction definitions directly as ISel wants the address base
// and offset to be separate operands, not a single complex operand.
def : Pat<(pre_truncsti8 i32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STBUX $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_truncsti16 i32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STHUX $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_store i32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STWUX $rS, $ptrreg, $ptroff)>;
let Predicates = [HasFPU] in {
def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STFSUX $rS, $ptrreg, $ptroff)>;
def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
(STFDUX $rS, $ptrreg, $ptroff)>;
}
// Store Multiple
def STMW : DForm_1<47, (outs), (ins gprc:$rS, memri:$dst),
"stmw $rS, $dst", IIC_LdStLMW, []>;
def SYNC : XForm_24_sync<31, 598, (outs), (ins i32imm:$L),
"sync $L", IIC_LdStSync, []>;
let isCodeGenOnly = 1 in {
def MSYNC : XForm_24_sync<31, 598, (outs), (ins),
"msync", IIC_LdStSync, []> {
let L = 0;
}
}
def : Pat<(int_ppc_sync), (SYNC 0)>, Requires<[HasSYNC]>;
def : Pat<(int_ppc_lwsync), (SYNC 1)>, Requires<[HasSYNC]>;
def : Pat<(int_ppc_sync), (MSYNC)>, Requires<[HasOnlyMSYNC]>;
def : Pat<(int_ppc_lwsync), (MSYNC)>, Requires<[HasOnlyMSYNC]>;
//===----------------------------------------------------------------------===//
// PPC32 Arithmetic Instructions.
//
let PPC970_Unit = 1 in { // FXU Operations.
def ADDI : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$imm),
"addi $rD, $rA, $imm", IIC_IntSimple,
[(set i32:$rD, (add i32:$rA, imm32SExt16:$imm))]>;
let BaseName = "addic" in {
let Defs = [CARRY] in
def ADDIC : DForm_2<12, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"addic $rD, $rA, $imm", IIC_IntGeneral,
[(set i32:$rD, (addc i32:$rA, imm32SExt16:$imm))]>,
RecFormRel, PPC970_DGroup_Cracked;
let Defs = [CARRY, CR0] in
def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"addic. $rD, $rA, $imm", IIC_IntGeneral,
[]>, isDOT, RecFormRel;
}
def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s17imm:$imm),
"addis $rD, $rA, $imm", IIC_IntSimple,
[(set i32:$rD, (add i32:$rA, imm16ShiftedSExt:$imm))]>;
let isCodeGenOnly = 1 in
def LA : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$sym),
"la $rD, $sym($rA)", IIC_IntGeneral,
[(set i32:$rD, (add i32:$rA,
(PPClo tglobaladdr:$sym, 0)))]>;
def MULLI : DForm_2< 7, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"mulli $rD, $rA, $imm", IIC_IntMulLI,
[(set i32:$rD, (mul i32:$rA, imm32SExt16:$imm))]>;
let Defs = [CARRY] in
def SUBFIC : DForm_2< 8, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
"subfic $rD, $rA, $imm", IIC_IntGeneral,
[(set i32:$rD, (subc imm32SExt16:$imm, i32:$rA))]>;
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI : DForm_2_r0<14, (outs gprc:$rD), (ins s16imm:$imm),
"li $rD, $imm", IIC_IntSimple,
[(set i32:$rD, imm32SExt16:$imm)]>;
def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins s17imm:$imm),
"lis $rD, $imm", IIC_IntSimple,
[(set i32:$rD, imm16ShiftedSExt:$imm)]>;
}
}
let PPC970_Unit = 1 in { // FXU Operations.
let Defs = [CR0] in {
def ANDIo : DForm_4<28, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"andi. $dst, $src1, $src2", IIC_IntGeneral,
[(set i32:$dst, (and i32:$src1, immZExt16:$src2))]>,
isDOT;
def ANDISo : DForm_4<29, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"andis. $dst, $src1, $src2", IIC_IntGeneral,
[(set i32:$dst, (and i32:$src1, imm16ShiftedZExt:$src2))]>,
isDOT;
}
def ORI : DForm_4<24, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"ori $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (or i32:$src1, immZExt16:$src2))]>;
def ORIS : DForm_4<25, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"oris $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (or i32:$src1, imm16ShiftedZExt:$src2))]>;
def XORI : DForm_4<26, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"xori $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (xor i32:$src1, immZExt16:$src2))]>;
def XORIS : DForm_4<27, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
"xoris $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (xor i32:$src1, imm16ShiftedZExt:$src2))]>;
def NOP : DForm_4_zero<24, (outs), (ins), "nop", IIC_IntSimple,
[]>;
let isCodeGenOnly = 1 in {
// The POWER6 and POWER7 have special group-terminating nops.
def NOP_GT_PWR6 : DForm_4_fixedreg_zero<24, 1, (outs), (ins),
"ori 1, 1, 0", IIC_IntSimple, []>;
def NOP_GT_PWR7 : DForm_4_fixedreg_zero<24, 2, (outs), (ins),
"ori 2, 2, 0", IIC_IntSimple, []>;
}
let isCompare = 1, hasSideEffects = 0 in {
def CMPWI : DForm_5_ext<11, (outs crrc:$crD), (ins gprc:$rA, s16imm:$imm),
"cmpwi $crD, $rA, $imm", IIC_IntCompare>;
def CMPLWI : DForm_6_ext<10, (outs crrc:$dst), (ins gprc:$src1, u16imm:$src2),
"cmplwi $dst, $src1, $src2", IIC_IntCompare>;
def CMPRB : X_BF3_L1_RS5_RS5<31, 192, (outs crbitrc:$BF),
(ins u1imm:$L, g8rc:$rA, g8rc:$rB),
"cmprb $BF, $L, $rA, $rB", IIC_IntCompare, []>,
Requires<[IsISA3_0]>;
}
}
let PPC970_Unit = 1, hasSideEffects = 0 in { // FXU Operations.
let isCommutable = 1 in {
defm NAND : XForm_6r<31, 476, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"nand", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (and i32:$rS, i32:$rB)))]>;
defm AND : XForm_6r<31, 28, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"and", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (and i32:$rS, i32:$rB))]>;
} // isCommutable
defm ANDC : XForm_6r<31, 60, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"andc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (and i32:$rS, (not i32:$rB)))]>;
let isCommutable = 1 in {
defm OR : XForm_6r<31, 444, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"or", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (or i32:$rS, i32:$rB))]>;
defm NOR : XForm_6r<31, 124, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"nor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (or i32:$rS, i32:$rB)))]>;
} // isCommutable
defm ORC : XForm_6r<31, 412, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"orc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (or i32:$rS, (not i32:$rB)))]>;
let isCommutable = 1 in {
defm EQV : XForm_6r<31, 284, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"eqv", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (xor i32:$rS, i32:$rB)))]>;
defm XOR : XForm_6r<31, 316, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"xor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (xor i32:$rS, i32:$rB))]>;
} // isCommutable
defm SLW : XForm_6r<31, 24, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"slw", "$rA, $rS, $rB", IIC_IntGeneral,
[(set i32:$rA, (PPCshl i32:$rS, i32:$rB))]>;
defm SRW : XForm_6r<31, 536, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"srw", "$rA, $rS, $rB", IIC_IntGeneral,
[(set i32:$rA, (PPCsrl i32:$rS, i32:$rB))]>;
defm SRAW : XForm_6rc<31, 792, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"sraw", "$rA, $rS, $rB", IIC_IntShift,
[(set i32:$rA, (PPCsra i32:$rS, i32:$rB))]>;
}
let PPC970_Unit = 1 in { // FXU Operations.
let hasSideEffects = 0 in {
defm SRAWI : XForm_10rc<31, 824, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH),
"srawi", "$rA, $rS, $SH", IIC_IntShift,
[(set i32:$rA, (sra i32:$rS, (i32 imm:$SH)))]>;
defm CNTLZW : XForm_11r<31, 26, (outs gprc:$rA), (ins gprc:$rS),
"cntlzw", "$rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (ctlz i32:$rS))]>;
defm CNTTZW : XForm_11r<31, 538, (outs gprc:$rA), (ins gprc:$rS),
"cnttzw", "$rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (cttz i32:$rS))]>, Requires<[IsISA3_0]>;
defm EXTSB : XForm_11r<31, 954, (outs gprc:$rA), (ins gprc:$rS),
"extsb", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i8))]>;
defm EXTSH : XForm_11r<31, 922, (outs gprc:$rA), (ins gprc:$rS),
"extsh", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i16))]>;
let isCommutable = 1 in
def CMPB : XForm_6<31, 508, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"cmpb $rA, $rS, $rB", IIC_IntGeneral,
[(set i32:$rA, (PPCcmpb i32:$rS, i32:$rB))]>;
}
let isCompare = 1, hasSideEffects = 0 in {
def CMPW : XForm_16_ext<31, 0, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
"cmpw $crD, $rA, $rB", IIC_IntCompare>;
def CMPLW : XForm_16_ext<31, 32, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
"cmplw $crD, $rA, $rB", IIC_IntCompare>;
}
}
let PPC970_Unit = 3, Predicates = [HasFPU] in { // FPU Operations.
//def FCMPO : XForm_17<63, 32, (outs CRRC:$crD), (ins FPRC:$fA, FPRC:$fB),
// "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
let isCompare = 1, hasSideEffects = 0 in {
def FCMPUS : XForm_17<63, 0, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB),
"fcmpu $crD, $fA, $fB", IIC_FPCompare>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def FCMPUD : XForm_17<63, 0, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
"fcmpu $crD, $fA, $fB", IIC_FPCompare>;
}
def FTDIV: XForm_17<63, 128, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
"ftdiv $crD, $fA, $fB", IIC_FPCompare>;
def FTSQRT: XForm_17a<63, 160, (outs crrc:$crD), (ins f8rc:$fB),
"ftsqrt $crD, $fB", IIC_FPCompare>;
let Uses = [RM] in {
let hasSideEffects = 0 in {
defm FCTIW : XForm_26r<63, 14, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiw", "$frD, $frB", IIC_FPGeneral,
[]>;
defm FCTIWU : XForm_26r<63, 142, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiwu", "$frD, $frB", IIC_FPGeneral,
[]>;
defm FCTIWZ : XForm_26r<63, 15, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiwz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiwz f64:$frB))]>;
defm FRSP : XForm_26r<63, 12, (outs f4rc:$frD), (ins f8rc:$frB),
"frsp", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fpround f64:$frB))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIND : XForm_26r<63, 392, (outs f8rc:$frD), (ins f8rc:$frB),
"frin", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fround f64:$frB))]>;
defm FRINS : XForm_26r<63, 392, (outs f4rc:$frD), (ins f4rc:$frB),
"frin", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fround f32:$frB))]>;
}
let hasSideEffects = 0 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIPD : XForm_26r<63, 456, (outs f8rc:$frD), (ins f8rc:$frB),
"frip", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fceil f64:$frB))]>;
defm FRIPS : XForm_26r<63, 456, (outs f4rc:$frD), (ins f4rc:$frB),
"frip", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fceil f32:$frB))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIZD : XForm_26r<63, 424, (outs f8rc:$frD), (ins f8rc:$frB),
"friz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (ftrunc f64:$frB))]>;
defm FRIZS : XForm_26r<63, 424, (outs f4rc:$frD), (ins f4rc:$frB),
"friz", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (ftrunc f32:$frB))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIMD : XForm_26r<63, 488, (outs f8rc:$frD), (ins f8rc:$frB),
"frim", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (ffloor f64:$frB))]>;
defm FRIMS : XForm_26r<63, 488, (outs f4rc:$frD), (ins f4rc:$frB),
"frim", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (ffloor f32:$frB))]>;
defm FSQRT : XForm_26r<63, 22, (outs f8rc:$frD), (ins f8rc:$frB),
"fsqrt", "$frD, $frB", IIC_FPSqrtD,
[(set f64:$frD, (fsqrt f64:$frB))]>;
defm FSQRTS : XForm_26r<59, 22, (outs f4rc:$frD), (ins f4rc:$frB),
"fsqrts", "$frD, $frB", IIC_FPSqrtS,
[(set f32:$frD, (fsqrt f32:$frB))]>;
}
}
}
/// Note that FMR is defined as pseudo-ops on the PPC970 because they are
/// often coalesced away and we don't want the dispatch group builder to think
/// that they will fill slots (which could cause the load of a LSU reject to
/// sneak into a d-group with a store).
let hasSideEffects = 0, Predicates = [HasFPU] in
defm FMR : XForm_26r<63, 72, (outs f4rc:$frD), (ins f4rc:$frB),
"fmr", "$frD, $frB", IIC_FPGeneral,
[]>, // (set f32:$frD, f32:$frB)
PPC970_Unit_Pseudo;
let PPC970_Unit = 3, hasSideEffects = 0, Predicates = [HasFPU] in { // FPU Operations.
// These are artificially split into two different forms, for 4/8 byte FP.
defm FABSS : XForm_26r<63, 264, (outs f4rc:$frD), (ins f4rc:$frB),
"fabs", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fabs f32:$frB))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FABSD : XForm_26r<63, 264, (outs f8rc:$frD), (ins f8rc:$frB),
"fabs", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fabs f64:$frB))]>;
defm FNABSS : XForm_26r<63, 136, (outs f4rc:$frD), (ins f4rc:$frB),
"fnabs", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fneg (fabs f32:$frB)))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FNABSD : XForm_26r<63, 136, (outs f8rc:$frD), (ins f8rc:$frB),
"fnabs", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fneg (fabs f64:$frB)))]>;
defm FNEGS : XForm_26r<63, 40, (outs f4rc:$frD), (ins f4rc:$frB),
"fneg", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fneg f32:$frB))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FNEGD : XForm_26r<63, 40, (outs f8rc:$frD), (ins f8rc:$frB),
"fneg", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fneg f64:$frB))]>;
defm FCPSGNS : XForm_28r<63, 8, (outs f4rc:$frD), (ins f4rc:$frA, f4rc:$frB),
"fcpsgn", "$frD, $frA, $frB", IIC_FPGeneral,
[(set f32:$frD, (fcopysign f32:$frB, f32:$frA))]>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FCPSGND : XForm_28r<63, 8, (outs f8rc:$frD), (ins f8rc:$frA, f8rc:$frB),
"fcpsgn", "$frD, $frA, $frB", IIC_FPGeneral,
[(set f64:$frD, (fcopysign f64:$frB, f64:$frA))]>;
// Reciprocal estimates.
defm FRE : XForm_26r<63, 24, (outs f8rc:$frD), (ins f8rc:$frB),
"fre", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfre f64:$frB))]>;
defm FRES : XForm_26r<59, 24, (outs f4rc:$frD), (ins f4rc:$frB),
"fres", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfre f32:$frB))]>;
defm FRSQRTE : XForm_26r<63, 26, (outs f8rc:$frD), (ins f8rc:$frB),
"frsqrte", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfrsqrte f64:$frB))]>;
defm FRSQRTES : XForm_26r<59, 26, (outs f4rc:$frD), (ins f4rc:$frB),
"frsqrtes", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfrsqrte f32:$frB))]>;
}
// XL-Form instructions. condition register logical ops.
//
let hasSideEffects = 0 in
def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA),
"mcrf $BF, $BFA", IIC_BrMCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// FIXME: According to the ISA (section 2.5.1 of version 2.06), the
// condition-register logical instructions have preferred forms. Specifically,
// it is preferred that the bit specified by the BT field be in the same
// condition register as that specified by the bit BB. We might want to account
// for this via hinting the register allocator and anti-dep breakers, or we
// could constrain the register class to force this constraint and then loosen
// it during register allocation via convertToThreeAddress or some similar
// mechanism.
let isCommutable = 1 in {
def CRAND : XLForm_1<19, 257, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crand $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (and i1:$CRA, i1:$CRB))]>;
def CRNAND : XLForm_1<19, 225, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crnand $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (not (and i1:$CRA, i1:$CRB)))]>;
def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"cror $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (or i1:$CRA, i1:$CRB))]>;
def CRXOR : XLForm_1<19, 193, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crxor $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (xor i1:$CRA, i1:$CRB))]>;
def CRNOR : XLForm_1<19, 33, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crnor $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (not (or i1:$CRA, i1:$CRB)))]>;
def CREQV : XLForm_1<19, 289, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"creqv $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (not (xor i1:$CRA, i1:$CRB)))]>;
} // isCommutable
def CRANDC : XLForm_1<19, 129, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crandc $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (and i1:$CRA, (not i1:$CRB)))]>;
def CRORC : XLForm_1<19, 417, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
"crorc $CRD, $CRA, $CRB", IIC_BrCR,
[(set i1:$CRD, (or i1:$CRA, (not i1:$CRB)))]>;
let isCodeGenOnly = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def CRSET : XLForm_1_ext<19, 289, (outs crbitrc:$dst), (ins),
"creqv $dst, $dst, $dst", IIC_BrCR,
[(set i1:$dst, 1)]>;
def CRUNSET: XLForm_1_ext<19, 193, (outs crbitrc:$dst), (ins),
"crxor $dst, $dst, $dst", IIC_BrCR,
[(set i1:$dst, 0)]>;
}
let Defs = [CR1EQ], CRD = 6 in {
def CR6SET : XLForm_1_ext<19, 289, (outs), (ins),
"creqv 6, 6, 6", IIC_BrCR,
[(PPCcr6set)]>;
def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins),
"crxor 6, 6, 6", IIC_BrCR,
[(PPCcr6unset)]>;
}
}
// XFX-Form instructions. Instructions that deal with SPRs.
//
def MFSPR : XFXForm_1<31, 339, (outs gprc:$RT), (ins i32imm:$SPR),
"mfspr $RT, $SPR", IIC_SprMFSPR>;
def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT),
"mtspr $SPR, $RT", IIC_SprMTSPR>;
def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR),
"mftb $RT, $SPR", IIC_SprMFTB>;
def MFPMR : XFXForm_1<31, 334, (outs gprc:$RT), (ins i32imm:$SPR),
"mfpmr $RT, $SPR", IIC_SprMFPMR>;
def MTPMR : XFXForm_1<31, 462, (outs), (ins i32imm:$SPR, gprc:$RT),
"mtpmr $SPR, $RT", IIC_SprMTPMR>;
// A pseudo-instruction used to implement the read of the 64-bit cycle counter
// on a 32-bit target.
let hasSideEffects = 1 in
def ReadTB : PPCCustomInserterPseudo<(outs gprc:$lo, gprc:$hi), (ins),
"#ReadTB", []>;
let Uses = [CTR] in {
def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins),
"mfctr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Defs = [CTR], Pattern = [(PPCmtctr i32:$rS)] in {
def MTCTR : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
"mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let hasSideEffects = 1, isCodeGenOnly = 1, Defs = [CTR] in {
let Pattern = [(int_set_loop_iterations i32:$rS)] in
def MTCTRloop : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
"mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Defs = [LR] in {
def MTLR : XFXForm_7_ext<31, 467, 8, (outs), (ins gprc:$rS),
"mtlr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Uses = [LR] in {
def MFLR : XFXForm_1_ext<31, 339, 8, (outs gprc:$rT), (ins),
"mflr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let isCodeGenOnly = 1 in {
// Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed
// like a GPR on the PPC970. As such, copies in and out have the same
// performance characteristics as an OR instruction.
def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS),
"mtspr 256, $rS", IIC_IntGeneral>,
PPC970_DGroup_Single, PPC970_Unit_FXU;
def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins),
"mfspr $rT, 256", IIC_IntGeneral>,
PPC970_DGroup_First, PPC970_Unit_FXU;
def MTVRSAVEv : XFXForm_7_ext<31, 467, 256,
(outs VRSAVERC:$reg), (ins gprc:$rS),
"mtspr 256, $rS", IIC_IntGeneral>,
PPC970_DGroup_Single, PPC970_Unit_FXU;
def MFVRSAVEv : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT),
(ins VRSAVERC:$reg),
"mfspr $rT, 256", IIC_IntGeneral>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
// Aliases for mtvrsave/mfvrsave to mfspr/mtspr.
def : InstAlias<"mtvrsave $rS", (MTVRSAVE gprc:$rS)>;
def : InstAlias<"mfvrsave $rS", (MFVRSAVE gprc:$rS)>;
// SPILL_VRSAVE - Indicate that we're dumping the VRSAVE register,
// so we'll need to scavenge a register for it.
let mayStore = 1 in
def SPILL_VRSAVE : PPCEmitTimePseudo<(outs), (ins VRSAVERC:$vrsave, memri:$F),
"#SPILL_VRSAVE", []>;
// RESTORE_VRSAVE - Indicate that we're restoring the VRSAVE register (previously
// spilled), so we'll need to scavenge a register for it.
let mayLoad = 1 in
def RESTORE_VRSAVE : PPCEmitTimePseudo<(outs VRSAVERC:$vrsave), (ins memri:$F),
"#RESTORE_VRSAVE", []>;
let hasSideEffects = 0 in {
// mtocrf's input needs to be prepared by shifting by an amount dependent
// on the cr register selected. Thus, post-ra anti-dep breaking must not
// later change that register assignment.
let hasExtraDefRegAllocReq = 1 in {
def MTOCRF: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins gprc:$ST),
"mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// Similarly to mtocrf, the mask for mtcrf must be prepared in a way that
// is dependent on the cr fields being set.
def MTCRF : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, gprc:$rS),
"mtcrf $FXM, $rS", IIC_BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // hasExtraDefRegAllocReq = 1
// mfocrf's input needs to be prepared by shifting by an amount dependent
// on the cr register selected. Thus, post-ra anti-dep breaking must not
// later change that register assignment.
let hasExtraSrcRegAllocReq = 1 in {
def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM),
"mfocrf $rT, $FXM", IIC_SprMFCRF>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// Similarly to mfocrf, the mask for mfcrf must be prepared in a way that
// is dependent on the cr fields being copied.
def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins),
"mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // hasExtraSrcRegAllocReq = 1
def MCRXRX : X_BF3<31, 576, (outs crrc:$BF), (ins),
"mcrxrx $BF", IIC_BrMCRX>, Requires<[IsISA3_0]>;
} // hasSideEffects = 0
let Predicates = [HasFPU] in {
// Custom inserter instruction to perform FADD in round-to-zero mode.
let Uses = [RM] in {
def FADDrtz: PPCCustomInserterPseudo<(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB), "",
[(set f64:$FRT, (PPCfaddrtz f64:$FRA, f64:$FRB))]>;
}
// The above pseudo gets expanded to make use of the following instructions
// to manipulate FPSCR. Note that FPSCR is not modeled at the DAG level.
let Uses = [RM], Defs = [RM] in {
def MTFSB0 : XForm_43<63, 70, (outs), (ins u5imm:$FM),
"mtfsb0 $FM", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MTFSB1 : XForm_43<63, 38, (outs), (ins u5imm:$FM),
"mtfsb1 $FM", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
let isCodeGenOnly = 1 in
def MTFSFb : XFLForm<63, 711, (outs), (ins i32imm:$FM, f8rc:$rT),
"mtfsf $FM, $rT", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
}
let Uses = [RM] in {
def MFFS : XForm_42<63, 583, (outs f8rc:$rT), (ins),
"mffs $rT", IIC_IntMFFS,
[(set f64:$rT, (PPCmffs))]>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
let Defs = [CR1] in
def MFFSo : XForm_42<63, 583, (outs f8rc:$rT), (ins),
"mffs. $rT", IIC_IntMFFS, []>, isDOT;
def MFFSCE : X_FRT5_XO2_XO3_XO10<63, 0, 1, 583, (outs f8rc:$rT), (ins),
"mffsce $rT", IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MFFSCDRN : X_FRT5_XO2_XO3_FRB5_XO10<63, 2, 4, 583, (outs f8rc:$rT),
(ins f8rc:$FRB), "mffscdrn $rT, $FRB",
IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MFFSCDRNI : X_FRT5_XO2_XO3_DRM3_XO10<63, 2, 5, 583, (outs f8rc:$rT),
(ins u3imm:$DRM),
"mffscdrni $rT, $DRM",
IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MFFSCRN : X_FRT5_XO2_XO3_FRB5_XO10<63, 2, 6, 583, (outs f8rc:$rT),
(ins f8rc:$FRB), "mffscrn $rT, $FRB",
IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MFFSCRNI : X_FRT5_XO2_XO3_RM2_X10<63, 2, 7, 583, (outs f8rc:$rT),
(ins u2imm:$RM), "mffscrni $rT, $RM",
IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MFFSL : X_FRT5_XO2_XO3_XO10<63, 3, 0, 583, (outs f8rc:$rT), (ins),
"mffsl $rT", IIC_IntMFFS, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
}
}
let Predicates = [IsISA3_0] in {
def MODSW : XForm_8<31, 779, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"modsw $rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (srem i32:$rA, i32:$rB))]>;
def MODUW : XForm_8<31, 267, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"moduw $rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (urem i32:$rA, i32:$rB))]>;
}
let PPC970_Unit = 1, hasSideEffects = 0 in { // FXU Operations.
// XO-Form instructions. Arithmetic instructions that can set overflow bit
let isCommutable = 1 in
-defm ADD4 : XOForm_1r<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "add", "$rT, $rA, $rB", IIC_IntSimple,
- [(set i32:$rT, (add i32:$rA, i32:$rB))]>;
+defm ADD4 : XOForm_1rx<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
+ "add", "$rT, $rA, $rB", IIC_IntSimple,
+ [(set i32:$rT, (add i32:$rA, i32:$rB))]>;
let isCodeGenOnly = 1 in
def ADD4TLS : XOForm_1<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, tlsreg32:$rB),
"add $rT, $rA, $rB", IIC_IntSimple,
[(set i32:$rT, (add i32:$rA, tglobaltlsaddr:$rB))]>;
let isCommutable = 1 in
defm ADDC : XOForm_1rc<31, 10, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"addc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (addc i32:$rA, i32:$rB))]>,
PPC970_DGroup_Cracked;
defm DIVW : XOForm_1rcr<31, 491, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"divw", "$rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (sdiv i32:$rA, i32:$rB))]>;
defm DIVWU : XOForm_1rcr<31, 459, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"divwu", "$rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (udiv i32:$rA, i32:$rB))]>;
-def DIVWE : XOForm_1<31, 427, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divwe $rT, $rA, $rB", IIC_IntDivW,
- [(set i32:$rT, (int_ppc_divwe gprc:$rA, gprc:$rB))]>,
- Requires<[HasExtDiv]>;
-let Defs = [CR0] in
-def DIVWEo : XOForm_1<31, 427, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divwe. $rT, $rA, $rB", IIC_IntDivW,
- []>, isDOT, PPC970_DGroup_Cracked, PPC970_DGroup_First,
- Requires<[HasExtDiv]>;
-def DIVWEU : XOForm_1<31, 395, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divweu $rT, $rA, $rB", IIC_IntDivW,
- [(set i32:$rT, (int_ppc_divweu gprc:$rA, gprc:$rB))]>,
- Requires<[HasExtDiv]>;
-let Defs = [CR0] in
-def DIVWEUo : XOForm_1<31, 395, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divweu. $rT, $rA, $rB", IIC_IntDivW,
- []>, isDOT, PPC970_DGroup_Cracked, PPC970_DGroup_First,
- Requires<[HasExtDiv]>;
+defm DIVWE : XOForm_1rcr<31, 427, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
+ "divwe", "$rT, $rA, $rB", IIC_IntDivW,
+ [(set i32:$rT, (int_ppc_divwe gprc:$rA, gprc:$rB))]>,
+ Requires<[HasExtDiv]>;
+defm DIVWEU : XOForm_1rcr<31, 395, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
+ "divweu", "$rT, $rA, $rB", IIC_IntDivW,
+ [(set i32:$rT, (int_ppc_divweu gprc:$rA, gprc:$rB))]>,
+ Requires<[HasExtDiv]>;
let isCommutable = 1 in {
defm MULHW : XOForm_1r<31, 75, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"mulhw", "$rT, $rA, $rB", IIC_IntMulHW,
[(set i32:$rT, (mulhs i32:$rA, i32:$rB))]>;
defm MULHWU : XOForm_1r<31, 11, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"mulhwu", "$rT, $rA, $rB", IIC_IntMulHWU,
[(set i32:$rT, (mulhu i32:$rA, i32:$rB))]>;
-defm MULLW : XOForm_1r<31, 235, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "mullw", "$rT, $rA, $rB", IIC_IntMulHW,
- [(set i32:$rT, (mul i32:$rA, i32:$rB))]>;
+defm MULLW : XOForm_1rx<31, 235, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
+ "mullw", "$rT, $rA, $rB", IIC_IntMulHW,
+ [(set i32:$rT, (mul i32:$rA, i32:$rB))]>;
} // isCommutable
-defm SUBF : XOForm_1r<31, 40, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "subf", "$rT, $rA, $rB", IIC_IntGeneral,
- [(set i32:$rT, (sub i32:$rB, i32:$rA))]>;
+defm SUBF : XOForm_1rx<31, 40, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
+ "subf", "$rT, $rA, $rB", IIC_IntGeneral,
+ [(set i32:$rT, (sub i32:$rB, i32:$rA))]>;
defm SUBFC : XOForm_1rc<31, 8, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"subfc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (subc i32:$rB, i32:$rA))]>,
PPC970_DGroup_Cracked;
defm NEG : XOForm_3r<31, 104, 0, (outs gprc:$rT), (ins gprc:$rA),
"neg", "$rT, $rA", IIC_IntSimple,
[(set i32:$rT, (ineg i32:$rA))]>;
let Uses = [CARRY] in {
let isCommutable = 1 in
defm ADDE : XOForm_1rc<31, 138, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"adde", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, i32:$rB))]>;
defm ADDME : XOForm_3rc<31, 234, 0, (outs gprc:$rT), (ins gprc:$rA),
"addme", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, -1))]>;
defm ADDZE : XOForm_3rc<31, 202, 0, (outs gprc:$rT), (ins gprc:$rA),
"addze", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, 0))]>;
defm SUBFE : XOForm_1rc<31, 136, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
"subfe", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (sube i32:$rB, i32:$rA))]>;
defm SUBFME : XOForm_3rc<31, 232, 0, (outs gprc:$rT), (ins gprc:$rA),
"subfme", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (sube -1, i32:$rA))]>;
defm SUBFZE : XOForm_3rc<31, 200, 0, (outs gprc:$rT), (ins gprc:$rA),
"subfze", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (sube 0, i32:$rA))]>;
}
}
// A-Form instructions. Most of the instructions executed in the FPU are of
// this type.
//
let PPC970_Unit = 3, hasSideEffects = 0, Predicates = [HasFPU] in { // FPU Operations.
let Uses = [RM] in {
let isCommutable = 1 in {
defm FMADD : AForm_1r<63, 29,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
"fmadd", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT, (fma f64:$FRA, f64:$FRC, f64:$FRB))]>;
defm FMADDS : AForm_1r<59, 29,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
"fmadds", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fma f32:$FRA, f32:$FRC, f32:$FRB))]>;
defm FMSUB : AForm_1r<63, 28,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
"fmsub", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT,
(fma f64:$FRA, f64:$FRC, (fneg f64:$FRB)))]>;
defm FMSUBS : AForm_1r<59, 28,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
"fmsubs", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT,
(fma f32:$FRA, f32:$FRC, (fneg f32:$FRB)))]>;
defm FNMADD : AForm_1r<63, 31,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
"fnmadd", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT,
(fneg (fma f64:$FRA, f64:$FRC, f64:$FRB)))]>;
defm FNMADDS : AForm_1r<59, 31,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
"fnmadds", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT,
(fneg (fma f32:$FRA, f32:$FRC, f32:$FRB)))]>;
defm FNMSUB : AForm_1r<63, 30,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
"fnmsub", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT, (fneg (fma f64:$FRA, f64:$FRC,
(fneg f64:$FRB))))]>;
defm FNMSUBS : AForm_1r<59, 30,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
"fnmsubs", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fneg (fma f32:$FRA, f32:$FRC,
(fneg f32:$FRB))))]>;
} // isCommutable
}
// FSEL is artificially split into 4 and 8-byte forms for the result. To avoid
// having 4 of these, force the comparison to always be an 8-byte double (code
// should use an FMRSD if the input comparison value really wants to be a float)
// and 4/8 byte forms for the result and operand type..
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FSELD : AForm_1r<63, 23,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
"fsel", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f64:$FRT, (PPCfsel f64:$FRA, f64:$FRC, f64:$FRB))]>;
defm FSELS : AForm_1r<63, 23,
(outs f4rc:$FRT), (ins f8rc:$FRA, f4rc:$FRC, f4rc:$FRB),
"fsel", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (PPCfsel f64:$FRA, f32:$FRC, f32:$FRB))]>;
let Uses = [RM] in {
let isCommutable = 1 in {
defm FADD : AForm_2r<63, 21,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
"fadd", "$FRT, $FRA, $FRB", IIC_FPAddSub,
[(set f64:$FRT, (fadd f64:$FRA, f64:$FRB))]>;
defm FADDS : AForm_2r<59, 21,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
"fadds", "$FRT, $FRA, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fadd f32:$FRA, f32:$FRB))]>;
} // isCommutable
defm FDIV : AForm_2r<63, 18,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
"fdiv", "$FRT, $FRA, $FRB", IIC_FPDivD,
[(set f64:$FRT, (fdiv f64:$FRA, f64:$FRB))]>;
defm FDIVS : AForm_2r<59, 18,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
"fdivs", "$FRT, $FRA, $FRB", IIC_FPDivS,
[(set f32:$FRT, (fdiv f32:$FRA, f32:$FRB))]>;
let isCommutable = 1 in {
defm FMUL : AForm_3r<63, 25,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC),
"fmul", "$FRT, $FRA, $FRC", IIC_FPFused,
[(set f64:$FRT, (fmul f64:$FRA, f64:$FRC))]>;
defm FMULS : AForm_3r<59, 25,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC),
"fmuls", "$FRT, $FRA, $FRC", IIC_FPGeneral,
[(set f32:$FRT, (fmul f32:$FRA, f32:$FRC))]>;
} // isCommutable
defm FSUB : AForm_2r<63, 20,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
"fsub", "$FRT, $FRA, $FRB", IIC_FPAddSub,
[(set f64:$FRT, (fsub f64:$FRA, f64:$FRB))]>;
defm FSUBS : AForm_2r<59, 20,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
"fsubs", "$FRT, $FRA, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fsub f32:$FRA, f32:$FRB))]>;
}
}
let hasSideEffects = 0 in {
let PPC970_Unit = 1 in { // FXU Operations.
let isSelect = 1 in
def ISEL : AForm_4<31, 15,
(outs gprc:$rT), (ins gprc_nor0:$rA, gprc:$rB, crbitrc:$cond),
"isel $rT, $rA, $rB, $cond", IIC_IntISEL,
[]>;
}
let PPC970_Unit = 1 in { // FXU Operations.
// M-Form instructions. rotate and mask instructions.
//
let isCommutable = 1 in {
// RLWIMI can be commuted if the rotate amount is zero.
defm RLWIMI : MForm_2r<20, (outs gprc:$rA),
(ins gprc:$rSi, gprc:$rS, u5imm:$SH, u5imm:$MB,
u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME",
IIC_IntRotate, []>, PPC970_DGroup_Cracked,
RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">;
}
let BaseName = "rlwinm" in {
def RLWINM : MForm_2<21,
(outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
"rlwinm $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>, RecFormRel;
let Defs = [CR0] in
def RLWINMo : MForm_2<21,
(outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
"rlwinm. $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>, isDOT, RecFormRel, PPC970_DGroup_Cracked;
}
defm RLWNM : MForm_2r<23, (outs gprc:$rA),
(ins gprc:$rS, gprc:$rB, u5imm:$MB, u5imm:$ME),
"rlwnm", "$rA, $rS, $rB, $MB, $ME", IIC_IntGeneral,
[]>;
}
} // hasSideEffects = 0
//===----------------------------------------------------------------------===//
// PowerPC Instruction Patterns
//
// Arbitrary immediate support. Implement in terms of LIS/ORI.
def : Pat<(i32 imm:$imm),
(ORI (LIS (HI16 imm:$imm)), (LO16 imm:$imm))>;
// Implement the 'not' operation with the NOR instruction.
def i32not : OutPatFrag<(ops node:$in),
(NOR $in, $in)>;
def : Pat<(not i32:$in),
(i32not $in)>;
// ADD an arbitrary immediate.
def : Pat<(add i32:$in, imm:$imm),
(ADDIS (ADDI $in, (LO16 imm:$imm)), (HA16 imm:$imm))>;
// OR an arbitrary immediate.
def : Pat<(or i32:$in, imm:$imm),
(ORIS (ORI $in, (LO16 imm:$imm)), (HI16 imm:$imm))>;
// XOR an arbitrary immediate.
def : Pat<(xor i32:$in, imm:$imm),
(XORIS (XORI $in, (LO16 imm:$imm)), (HI16 imm:$imm))>;
// SUBFIC
def : Pat<(sub imm32SExt16:$imm, i32:$in),
(SUBFIC $in, imm:$imm)>;
// SHL/SRL
def : Pat<(shl i32:$in, (i32 imm:$imm)),
(RLWINM $in, imm:$imm, 0, (SHL32 imm:$imm))>;
def : Pat<(srl i32:$in, (i32 imm:$imm)),
(RLWINM $in, (SRL32 imm:$imm), imm:$imm, 31)>;
// ROTL
def : Pat<(rotl i32:$in, i32:$sh),
(RLWNM $in, $sh, 0, 31)>;
def : Pat<(rotl i32:$in, (i32 imm:$imm)),
(RLWINM $in, imm:$imm, 0, 31)>;
// RLWNM
def : Pat<(and (rotl i32:$in, i32:$sh), maskimm32:$imm),
(RLWNM $in, $sh, (MB maskimm32:$imm), (ME maskimm32:$imm))>;
// Calls
def : Pat<(PPCcall (i32 tglobaladdr:$dst)),
(BL tglobaladdr:$dst)>;
def : Pat<(PPCcall (i32 texternalsym:$dst)),
(BL texternalsym:$dst)>;
// Calls for AIX only
def : Pat<(PPCcall (i32 mcsym:$dst)),
(BL mcsym:$dst)>;
def : Pat<(PPCcall_nop (i32 mcsym:$dst)),
(BL_NOP mcsym:$dst)>;
def : Pat<(PPCtc_return (i32 tglobaladdr:$dst), imm:$imm),
(TCRETURNdi tglobaladdr:$dst, imm:$imm)>;
def : Pat<(PPCtc_return (i32 texternalsym:$dst), imm:$imm),
(TCRETURNdi texternalsym:$dst, imm:$imm)>;
def : Pat<(PPCtc_return CTRRC:$dst, imm:$imm),
(TCRETURNri CTRRC:$dst, imm:$imm)>;
// Hi and Lo for Darwin Global Addresses.
def : Pat<(PPChi tglobaladdr:$in, 0), (LIS tglobaladdr:$in)>;
def : Pat<(PPClo tglobaladdr:$in, 0), (LI tglobaladdr:$in)>;
def : Pat<(PPChi tconstpool:$in, 0), (LIS tconstpool:$in)>;
def : Pat<(PPClo tconstpool:$in, 0), (LI tconstpool:$in)>;
def : Pat<(PPChi tjumptable:$in, 0), (LIS tjumptable:$in)>;
def : Pat<(PPClo tjumptable:$in, 0), (LI tjumptable:$in)>;
def : Pat<(PPChi tblockaddress:$in, 0), (LIS tblockaddress:$in)>;
def : Pat<(PPClo tblockaddress:$in, 0), (LI tblockaddress:$in)>;
def : Pat<(PPChi tglobaltlsaddr:$g, i32:$in),
(ADDIS $in, tglobaltlsaddr:$g)>;
def : Pat<(PPClo tglobaltlsaddr:$g, i32:$in),
(ADDI $in, tglobaltlsaddr:$g)>;
def : Pat<(add i32:$in, (PPChi tglobaladdr:$g, 0)),
(ADDIS $in, tglobaladdr:$g)>;
def : Pat<(add i32:$in, (PPChi tconstpool:$g, 0)),
(ADDIS $in, tconstpool:$g)>;
def : Pat<(add i32:$in, (PPChi tjumptable:$g, 0)),
(ADDIS $in, tjumptable:$g)>;
def : Pat<(add i32:$in, (PPChi tblockaddress:$g, 0)),
(ADDIS $in, tblockaddress:$g)>;
// Support for thread-local storage.
def PPC32GOT: PPCEmitTimePseudo<(outs gprc:$rD), (ins), "#PPC32GOT",
[(set i32:$rD, (PPCppc32GOT))]>;
// Get the _GLOBAL_OFFSET_TABLE_ in PIC mode.
// This uses two output registers, the first as the real output, the second as a
// temporary register, used internally in code generation.
def PPC32PICGOT: PPCEmitTimePseudo<(outs gprc:$rD, gprc:$rT), (ins), "#PPC32PICGOT",
[]>, NoEncode<"$rT">;
def LDgotTprelL32: PPCEmitTimePseudo<(outs gprc:$rD), (ins s16imm:$disp, gprc_nor0:$reg),
"#LDgotTprelL32",
[(set i32:$rD,
(PPCldGotTprelL tglobaltlsaddr:$disp, i32:$reg))]>;
def : Pat<(PPCaddTls i32:$in, tglobaltlsaddr:$g),
(ADD4TLS $in, tglobaltlsaddr:$g)>;
def ADDItlsgdL32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDItlsgdL32",
[(set i32:$rD,
(PPCaddiTlsgdL i32:$reg, tglobaltlsaddr:$disp))]>;
// LR is a true define, while the rest of the Defs are clobbers. R3 is
// explicitly defined when this op is created, so not mentioned here.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [R0,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
def GETtlsADDR32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
"GETtlsADDR32",
[(set i32:$rD,
(PPCgetTlsAddr i32:$reg, tglobaltlsaddr:$sym))]>;
// Combined op for ADDItlsgdL32 and GETtlsADDR32, late expanded. R3 and LR
// are true defines while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
def ADDItlsgdLADDR32 : PPCEmitTimePseudo<(outs gprc:$rD),
(ins gprc_nor0:$reg, s16imm:$disp, tlsgd32:$sym),
"#ADDItlsgdLADDR32",
[(set i32:$rD,
(PPCaddiTlsgdLAddr i32:$reg,
tglobaltlsaddr:$disp,
tglobaltlsaddr:$sym))]>;
def ADDItlsldL32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDItlsldL32",
[(set i32:$rD,
(PPCaddiTlsldL i32:$reg, tglobaltlsaddr:$disp))]>;
// LR is a true define, while the rest of the Defs are clobbers. R3 is
// explicitly defined when this op is created, so not mentioned here.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [R0,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
def GETtlsldADDR32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
"GETtlsldADDR32",
[(set i32:$rD,
(PPCgetTlsldAddr i32:$reg,
tglobaltlsaddr:$sym))]>;
// Combined op for ADDItlsldL32 and GETtlsADDR32, late expanded. R3 and LR
// are true defines while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
def ADDItlsldLADDR32 : PPCEmitTimePseudo<(outs gprc:$rD),
(ins gprc_nor0:$reg, s16imm:$disp, tlsgd32:$sym),
"#ADDItlsldLADDR32",
[(set i32:$rD,
(PPCaddiTlsldLAddr i32:$reg,
tglobaltlsaddr:$disp,
tglobaltlsaddr:$sym))]>;
def ADDIdtprelL32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDIdtprelL32",
[(set i32:$rD,
(PPCaddiDtprelL i32:$reg, tglobaltlsaddr:$disp))]>;
def ADDISdtprelHA32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDISdtprelHA32",
[(set i32:$rD,
(PPCaddisDtprelHA i32:$reg,
tglobaltlsaddr:$disp))]>;
// Support for Position-independent code
def LWZtoc : PPCEmitTimePseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
"#LWZtoc",
[(set i32:$rD,
(PPCtoc_entry tglobaladdr:$disp, i32:$reg))]>;
// Get Global (GOT) Base Register offset, from the word immediately preceding
// the function label.
def UpdateGBR : PPCEmitTimePseudo<(outs gprc:$rD, gprc:$rT), (ins gprc:$rI), "#UpdateGBR", []>;
// Standard shifts. These are represented separately from the real shifts above
// so that we can distinguish between shifts that allow 5-bit and 6-bit shift
// amounts.
def : Pat<(sra i32:$rS, i32:$rB),
(SRAW $rS, $rB)>;
def : Pat<(srl i32:$rS, i32:$rB),
(SRW $rS, $rB)>;
def : Pat<(shl i32:$rS, i32:$rB),
(SLW $rS, $rB)>;
def : Pat<(zextloadi1 iaddr:$src),
(LBZ iaddr:$src)>;
def : Pat<(zextloadi1 xaddr:$src),
(LBZX xaddr:$src)>;
def : Pat<(extloadi1 iaddr:$src),
(LBZ iaddr:$src)>;
def : Pat<(extloadi1 xaddr:$src),
(LBZX xaddr:$src)>;
def : Pat<(extloadi8 iaddr:$src),
(LBZ iaddr:$src)>;
def : Pat<(extloadi8 xaddr:$src),
(LBZX xaddr:$src)>;
def : Pat<(extloadi16 iaddr:$src),
(LHZ iaddr:$src)>;
def : Pat<(extloadi16 xaddr:$src),
(LHZX xaddr:$src)>;
let Predicates = [HasFPU] in {
def : Pat<(f64 (extloadf32 iaddr:$src)),
(COPY_TO_REGCLASS (LFS iaddr:$src), F8RC)>;
def : Pat<(f64 (extloadf32 xaddr:$src)),
(COPY_TO_REGCLASS (LFSX xaddr:$src), F8RC)>;
def : Pat<(f64 (fpextend f32:$src)),
(COPY_TO_REGCLASS $src, F8RC)>;
}
// Only seq_cst fences require the heavyweight sync (SYNC 0).
// All others can use the lightweight sync (SYNC 1).
// source: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// The rule for seq_cst is duplicated to work with both 64 bits and 32 bits
// versions of Power.
def : Pat<(atomic_fence (i64 7), (imm)), (SYNC 0)>, Requires<[HasSYNC]>;
def : Pat<(atomic_fence (i32 7), (imm)), (SYNC 0)>, Requires<[HasSYNC]>;
def : Pat<(atomic_fence (imm), (imm)), (SYNC 1)>, Requires<[HasSYNC]>;
def : Pat<(atomic_fence (imm), (imm)), (MSYNC)>, Requires<[HasOnlyMSYNC]>;
let Predicates = [HasFPU] in {
// Additional FNMSUB patterns: -a*c + b == -(a*c - b)
def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B),
(FNMSUB $A, $C, $B)>;
def : Pat<(fma f64:$A, (fneg f64:$C), f64:$B),
(FNMSUB $A, $C, $B)>;
def : Pat<(fma (fneg f32:$A), f32:$C, f32:$B),
(FNMSUBS $A, $C, $B)>;
def : Pat<(fma f32:$A, (fneg f32:$C), f32:$B),
(FNMSUBS $A, $C, $B)>;
// FCOPYSIGN's operand types need not agree.
def : Pat<(fcopysign f64:$frB, f32:$frA),
(FCPSGND (COPY_TO_REGCLASS $frA, F8RC), $frB)>;
def : Pat<(fcopysign f32:$frB, f64:$frA),
(FCPSGNS (COPY_TO_REGCLASS $frA, F4RC), $frB)>;
}
include "PPCInstrAltivec.td"
include "PPCInstrSPE.td"
include "PPCInstr64Bit.td"
include "PPCInstrVSX.td"
include "PPCInstrQPX.td"
include "PPCInstrHTM.td"
def crnot : OutPatFrag<(ops node:$in),
(CRNOR $in, $in)>;
def : Pat<(not i1:$in),
(crnot $in)>;
// Patterns for arithmetic i1 operations.
def : Pat<(add i1:$a, i1:$b),
(CRXOR $a, $b)>;
def : Pat<(sub i1:$a, i1:$b),
(CRXOR $a, $b)>;
def : Pat<(mul i1:$a, i1:$b),
(CRAND $a, $b)>;
// We're sometimes asked to materialize i1 -1, which is just 1 in this case
// (-1 is used to mean all bits set).
def : Pat<(i1 -1), (CRSET)>;
// i1 extensions, implemented in terms of isel.
def : Pat<(i32 (zext i1:$in)),
(SELECT_I4 $in, (LI 1), (LI 0))>;
def : Pat<(i32 (sext i1:$in)),
(SELECT_I4 $in, (LI -1), (LI 0))>;
def : Pat<(i64 (zext i1:$in)),
(SELECT_I8 $in, (LI8 1), (LI8 0))>;
def : Pat<(i64 (sext i1:$in)),
(SELECT_I8 $in, (LI8 -1), (LI8 0))>;
// FIXME: We should choose either a zext or a sext based on other constants
// already around.
def : Pat<(i32 (anyext i1:$in)),
(SELECT_I4 $in, (LI 1), (LI 0))>;
def : Pat<(i64 (anyext i1:$in)),
(SELECT_I8 $in, (LI8 1), (LI8 0))>;
// match setcc on i1 variables.
// CRANDC is:
// 1 1 : F
// 1 0 : T
// 0 1 : F
// 0 0 : F
//
// LT is:
// -1 -1 : F
// -1 0 : T
// 0 -1 : F
// 0 0 : F
//
// ULT is:
// 1 1 : F
// 1 0 : F
// 0 1 : T
// 0 0 : F
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETLT)),
(CRANDC $s1, $s2)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETULT)),
(CRANDC $s2, $s1)>;
// CRORC is:
// 1 1 : T
// 1 0 : T
// 0 1 : F
// 0 0 : T
//
// LE is:
// -1 -1 : T
// -1 0 : T
// 0 -1 : F
// 0 0 : T
//
// ULE is:
// 1 1 : T
// 1 0 : F
// 0 1 : T
// 0 0 : T
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETLE)),
(CRORC $s1, $s2)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETULE)),
(CRORC $s2, $s1)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETEQ)),
(CREQV $s1, $s2)>;
// GE is:
// -1 -1 : T
// -1 0 : F
// 0 -1 : T
// 0 0 : T
//
// UGE is:
// 1 1 : T
// 1 0 : T
// 0 1 : F
// 0 0 : T
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETGE)),
(CRORC $s2, $s1)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETUGE)),
(CRORC $s1, $s2)>;
// GT is:
// -1 -1 : F
// -1 0 : F
// 0 -1 : T
// 0 0 : F
//
// UGT is:
// 1 1 : F
// 1 0 : T
// 0 1 : F
// 0 0 : F
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETGT)),
(CRANDC $s2, $s1)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETUGT)),
(CRANDC $s1, $s2)>;
def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETNE)),
(CRXOR $s1, $s2)>;
// match setcc on non-i1 (non-vector) variables. Note that SETUEQ, SETOGE,
// SETOLE, SETONE, SETULT and SETUGT should be expanded by legalize for
// floating-point types.
multiclass CRNotPat<dag pattern, dag result> {
def : Pat<pattern, (crnot result)>;
def : Pat<(not pattern), result>;
// We can also fold the crnot into an extension:
def : Pat<(i32 (zext pattern)),
(SELECT_I4 result, (LI 0), (LI 1))>;
def : Pat<(i32 (sext pattern)),
(SELECT_I4 result, (LI 0), (LI -1))>;
// We can also fold the crnot into an extension:
def : Pat<(i64 (zext pattern)),
(SELECT_I8 result, (LI8 0), (LI8 1))>;
def : Pat<(i64 (sext pattern)),
(SELECT_I8 result, (LI8 0), (LI8 -1))>;
// FIXME: We should choose either a zext or a sext based on other constants
// already around.
def : Pat<(i32 (anyext pattern)),
(SELECT_I4 result, (LI 0), (LI 1))>;
def : Pat<(i64 (anyext pattern)),
(SELECT_I8 result, (LI8 0), (LI8 1))>;
}
// FIXME: Because of what seems like a bug in TableGen's type-inference code,
// we need to write imm:$imm in the output patterns below, not just $imm, or
// else the resulting matcher will not correctly add the immediate operand
// (making it a register operand instead).
// extended SETCC.
multiclass ExtSetCCPat<CondCode cc, PatFrag pfrag,
OutPatFrag rfrag, OutPatFrag rfrag8> {
def : Pat<(i32 (zext (i1 (pfrag i32:$s1, cc)))),
(rfrag $s1)>;
def : Pat<(i64 (zext (i1 (pfrag i64:$s1, cc)))),
(rfrag8 $s1)>;
def : Pat<(i64 (zext (i1 (pfrag i32:$s1, cc)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1), sub_32)>;
def : Pat<(i32 (zext (i1 (pfrag i64:$s1, cc)))),
(EXTRACT_SUBREG (rfrag8 $s1), sub_32)>;
def : Pat<(i32 (anyext (i1 (pfrag i32:$s1, cc)))),
(rfrag $s1)>;
def : Pat<(i64 (anyext (i1 (pfrag i64:$s1, cc)))),
(rfrag8 $s1)>;
def : Pat<(i64 (anyext (i1 (pfrag i32:$s1, cc)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1), sub_32)>;
def : Pat<(i32 (anyext (i1 (pfrag i64:$s1, cc)))),
(EXTRACT_SUBREG (rfrag8 $s1), sub_32)>;
}
// Note that we do all inversions below with i(32|64)not, instead of using
// (xori x, 1) because on the A2 nor has single-cycle latency while xori
// has 2-cycle latency.
defm : ExtSetCCPat<SETEQ,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (CNTLZW $in), 27, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (CNTLZD $in), 58, 63)> >;
defm : ExtSetCCPat<SETNE,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (i32not (CNTLZW $in)), 27, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (i64not (CNTLZD $in)), 58, 63)> >;
defm : ExtSetCCPat<SETLT,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM $in, 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL $in, 1, 63)> >;
defm : ExtSetCCPat<SETGE,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (i32not $in), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (i64not $in), 1, 63)> >;
defm : ExtSetCCPat<SETGT,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (ANDC (NEG $in), $in), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (ANDC8 (NEG8 $in), $in), 1, 63)> >;
defm : ExtSetCCPat<SETLE,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, 0, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (ORC $in, (NEG $in)), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (ORC8 $in, (NEG8 $in)), 1, 63)> >;
defm : ExtSetCCPat<SETLT,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, -1, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (AND $in, (ADDI $in, 1)), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (AND8 $in, (ADDI8 $in, 1)), 1, 63)> >;
defm : ExtSetCCPat<SETGE,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, -1, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (NAND $in, (ADDI $in, 1)), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (NAND8 $in, (ADDI8 $in, 1)), 1, 63)> >;
defm : ExtSetCCPat<SETGT,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, -1, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM (i32not $in), 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL (i64not $in), 1, 63)> >;
defm : ExtSetCCPat<SETLE,
PatFrag<(ops node:$in, node:$cc),
(setcc $in, -1, $cc)>,
OutPatFrag<(ops node:$in),
(RLWINM $in, 1, 31, 31)>,
OutPatFrag<(ops node:$in),
(RLDICL $in, 1, 63)> >;
// An extended SETCC with shift amount.
multiclass ExtSetCCShiftPat<CondCode cc, PatFrag pfrag,
OutPatFrag rfrag, OutPatFrag rfrag8> {
def : Pat<(i32 (zext (i1 (pfrag i32:$s1, i32:$sa, cc)))),
(rfrag $s1, $sa)>;
def : Pat<(i64 (zext (i1 (pfrag i64:$s1, i32:$sa, cc)))),
(rfrag8 $s1, $sa)>;
def : Pat<(i64 (zext (i1 (pfrag i32:$s1, i32:$sa, cc)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1, $sa), sub_32)>;
def : Pat<(i32 (zext (i1 (pfrag i64:$s1, i32:$sa, cc)))),
(EXTRACT_SUBREG (rfrag8 $s1, $sa), sub_32)>;
def : Pat<(i32 (anyext (i1 (pfrag i32:$s1, i32:$sa, cc)))),
(rfrag $s1, $sa)>;
def : Pat<(i64 (anyext (i1 (pfrag i64:$s1, i32:$sa, cc)))),
(rfrag8 $s1, $sa)>;
def : Pat<(i64 (anyext (i1 (pfrag i32:$s1, i32:$sa, cc)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1, $sa), sub_32)>;
def : Pat<(i32 (anyext (i1 (pfrag i64:$s1, i32:$sa, cc)))),
(EXTRACT_SUBREG (rfrag8 $s1, $sa), sub_32)>;
}
defm : ExtSetCCShiftPat<SETNE,
PatFrag<(ops node:$in, node:$sa, node:$cc),
(setcc (and $in, (shl 1, $sa)), 0, $cc)>,
OutPatFrag<(ops node:$in, node:$sa),
(RLWNM $in, (SUBFIC $sa, 32), 31, 31)>,
OutPatFrag<(ops node:$in, node:$sa),
(RLDCL $in, (SUBFIC $sa, 64), 63)> >;
defm : ExtSetCCShiftPat<SETEQ,
PatFrag<(ops node:$in, node:$sa, node:$cc),
(setcc (and $in, (shl 1, $sa)), 0, $cc)>,
OutPatFrag<(ops node:$in, node:$sa),
(RLWNM (i32not $in),
(SUBFIC $sa, 32), 31, 31)>,
OutPatFrag<(ops node:$in, node:$sa),
(RLDCL (i64not $in),
(SUBFIC $sa, 64), 63)> >;
// SETCC for i32.
def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETULT)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_lt)>;
def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETLT)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_lt)>;
def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETUGT)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_gt)>;
def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETGT)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_gt)>;
def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_eq)>;
def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_eq)>;
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
// ori r2, r2, 22136
// cmpw cr0, r3, r2
// beq cr0,L6
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
// cmplwi cr0,r0,0x5678
// beq cr0,L6
def : Pat<(i1 (setcc i32:$s1, imm:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPLWI (XORIS $s1, (HI16 imm:$imm)),
(LO16 imm:$imm)), sub_eq)>;
defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETUGE)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_lt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETGE)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_lt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETULE)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_gt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETLE)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_gt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETNE)),
(EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_eq)>;
defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETNE)),
(EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_eq)>;
defm : CRNotPat<(i1 (setcc i32:$s1, imm:$imm, SETNE)),
(EXTRACT_SUBREG (CMPLWI (XORIS $s1, (HI16 imm:$imm)),
(LO16 imm:$imm)), sub_eq)>;
def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETULT)),
(EXTRACT_SUBREG (CMPLW $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETLT)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETUGT)),
(EXTRACT_SUBREG (CMPLW $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETGT)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETEQ)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETUGE)),
(EXTRACT_SUBREG (CMPLW $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETGE)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETULE)),
(EXTRACT_SUBREG (CMPLW $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETLE)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETNE)),
(EXTRACT_SUBREG (CMPW $s1, $s2), sub_eq)>;
// SETCC for i64.
def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETULT)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_lt)>;
def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETLT)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_lt)>;
def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETUGT)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_gt)>;
def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETGT)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_gt)>;
def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_eq)>;
def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_eq)>;
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
// ori r2, r2, 22136
// cmpd cr0, r3, r2
// beq cr0,L6
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
// cmpldi cr0,r0,0x5678
// beq cr0,L6
def : Pat<(i1 (setcc i64:$s1, imm64ZExt32:$imm, SETEQ)),
(EXTRACT_SUBREG (CMPLDI (XORIS8 $s1, (HI16 imm:$imm)),
(LO16 imm:$imm)), sub_eq)>;
defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETUGE)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_lt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETGE)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_lt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETULE)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_gt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETLE)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_gt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETNE)),
(EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_eq)>;
defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETNE)),
(EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_eq)>;
defm : CRNotPat<(i1 (setcc i64:$s1, imm64ZExt32:$imm, SETNE)),
(EXTRACT_SUBREG (CMPLDI (XORIS8 $s1, (HI16 imm:$imm)),
(LO16 imm:$imm)), sub_eq)>;
def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETULT)),
(EXTRACT_SUBREG (CMPLD $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETLT)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETUGT)),
(EXTRACT_SUBREG (CMPLD $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETGT)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETEQ)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETUGE)),
(EXTRACT_SUBREG (CMPLD $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETGE)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETULE)),
(EXTRACT_SUBREG (CMPLD $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETLE)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETNE)),
(EXTRACT_SUBREG (CMPD $s1, $s2), sub_eq)>;
// SETCC for f32.
let Predicates = [HasFPU] in {
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOLT)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETLT)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOGT)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETGT)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOEQ)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETEQ)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETUO)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_un)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUGE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETGE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETULE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETLE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUNE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETNE)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETO)),
(EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_un)>;
// SETCC for f64.
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOLT)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETLT)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOGT)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETGT)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOEQ)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETEQ)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETUO)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_un)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUGE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETGE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETULE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETLE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUNE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETNE)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETO)),
(EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_un)>;
// SETCC for f128.
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETOLT)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETLT)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_lt)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETOGT)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETGT)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETOEQ)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETEQ)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_eq)>;
def : Pat<(i1 (setcc f128:$s1, f128:$s2, SETUO)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_un)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETUGE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETGE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_lt)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETULE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETLE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETUNE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETNE)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_eq)>;
defm : CRNotPat<(i1 (setcc f128:$s1, f128:$s2, SETO)),
(EXTRACT_SUBREG (XSCMPUQP $s1, $s2), sub_un)>;
}
// This must be in this file because it relies on patterns defined in this file
// after the inclusion of the instruction sets.
let Predicates = [HasSPE] in {
// SETCC for f32.
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOLT)),
(EXTRACT_SUBREG (EFSCMPLT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETLT)),
(EXTRACT_SUBREG (EFSCMPLT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOGT)),
(EXTRACT_SUBREG (EFSCMPGT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETGT)),
(EXTRACT_SUBREG (EFSCMPGT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOEQ)),
(EXTRACT_SUBREG (EFSCMPEQ $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETEQ)),
(EXTRACT_SUBREG (EFSCMPEQ $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUGE)),
(EXTRACT_SUBREG (EFSCMPLT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETGE)),
(EXTRACT_SUBREG (EFSCMPLT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETULE)),
(EXTRACT_SUBREG (EFSCMPGT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETLE)),
(EXTRACT_SUBREG (EFSCMPGT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUNE)),
(EXTRACT_SUBREG (EFSCMPEQ $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETNE)),
(EXTRACT_SUBREG (EFSCMPEQ $s1, $s2), sub_gt)>;
// SETCC for f64.
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOLT)),
(EXTRACT_SUBREG (EFDCMPLT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETLT)),
(EXTRACT_SUBREG (EFDCMPLT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOGT)),
(EXTRACT_SUBREG (EFDCMPGT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETGT)),
(EXTRACT_SUBREG (EFDCMPGT $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOEQ)),
(EXTRACT_SUBREG (EFDCMPEQ $s1, $s2), sub_gt)>;
def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETEQ)),
(EXTRACT_SUBREG (EFDCMPEQ $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUGE)),
(EXTRACT_SUBREG (EFDCMPLT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETGE)),
(EXTRACT_SUBREG (EFDCMPLT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETULE)),
(EXTRACT_SUBREG (EFDCMPGT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETLE)),
(EXTRACT_SUBREG (EFDCMPGT $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUNE)),
(EXTRACT_SUBREG (EFDCMPEQ $s1, $s2), sub_gt)>;
defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETNE)),
(EXTRACT_SUBREG (EFDCMPEQ $s1, $s2), sub_gt)>;
}
// match select on i1 variables:
def : Pat<(i1 (select i1:$cond, i1:$tval, i1:$fval)),
(CROR (CRAND $cond , $tval),
(CRAND (crnot $cond), $fval))>;
// match selectcc on i1 variables:
// select (lhs == rhs), tval, fval is:
// ((lhs == rhs) & tval) | (!(lhs == rhs) & fval)
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETLT)),
(CROR (CRAND (CRANDC $lhs, $rhs), $tval),
(CRAND (CRORC $rhs, $lhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETULT)),
(CROR (CRAND (CRANDC $rhs, $lhs), $tval),
(CRAND (CRORC $lhs, $rhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETLE)),
(CROR (CRAND (CRORC $lhs, $rhs), $tval),
(CRAND (CRANDC $rhs, $lhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETULE)),
(CROR (CRAND (CRORC $rhs, $lhs), $tval),
(CRAND (CRANDC $lhs, $rhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETEQ)),
(CROR (CRAND (CREQV $lhs, $rhs), $tval),
(CRAND (CRXOR $lhs, $rhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETGE)),
(CROR (CRAND (CRORC $rhs, $lhs), $tval),
(CRAND (CRANDC $lhs, $rhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETUGE)),
(CROR (CRAND (CRORC $lhs, $rhs), $tval),
(CRAND (CRANDC $rhs, $lhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETGT)),
(CROR (CRAND (CRANDC $rhs, $lhs), $tval),
(CRAND (CRORC $lhs, $rhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETUGT)),
(CROR (CRAND (CRANDC $lhs, $rhs), $tval),
(CRAND (CRORC $rhs, $lhs), $fval))>;
def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETNE)),
(CROR (CRAND (CREQV $lhs, $rhs), $fval),
(CRAND (CRXOR $lhs, $rhs), $tval))>;
// match selectcc on i1 variables with non-i1 output.
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETLT)),
(SELECT_I4 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETULT)),
(SELECT_I4 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETLE)),
(SELECT_I4 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETULE)),
(SELECT_I4 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETEQ)),
(SELECT_I4 (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETGE)),
(SELECT_I4 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETUGE)),
(SELECT_I4 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETGT)),
(SELECT_I4 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETUGT)),
(SELECT_I4 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETNE)),
(SELECT_I4 (CRXOR $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETLT)),
(SELECT_I8 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETULT)),
(SELECT_I8 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETLE)),
(SELECT_I8 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETULE)),
(SELECT_I8 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETEQ)),
(SELECT_I8 (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETGE)),
(SELECT_I8 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETUGE)),
(SELECT_I8 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETGT)),
(SELECT_I8 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETUGT)),
(SELECT_I8 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETNE)),
(SELECT_I8 (CRXOR $lhs, $rhs), $tval, $fval)>;
let Predicates = [HasFPU] in {
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLT)),
(SELECT_F4 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULT)),
(SELECT_F4 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLE)),
(SELECT_F4 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULE)),
(SELECT_F4 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETEQ)),
(SELECT_F4 (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGE)),
(SELECT_F4 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGE)),
(SELECT_F4 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGT)),
(SELECT_F4 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGT)),
(SELECT_F4 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETNE)),
(SELECT_F4 (CRXOR $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLT)),
(SELECT_F8 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULT)),
(SELECT_F8 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLE)),
(SELECT_F8 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULE)),
(SELECT_F8 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETEQ)),
(SELECT_F8 (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGE)),
(SELECT_F8 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGE)),
(SELECT_F8 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGT)),
(SELECT_F8 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGT)),
(SELECT_F8 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETNE)),
(SELECT_F8 (CRXOR $lhs, $rhs), $tval, $fval)>;
}
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETLT)),
(SELECT_F16 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETULT)),
(SELECT_F16 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETLE)),
(SELECT_F16 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETULE)),
(SELECT_F16 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETEQ)),
(SELECT_F16 (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETGE)),
(SELECT_F16 (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETUGE)),
(SELECT_F16 (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETGT)),
(SELECT_F16 (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETUGT)),
(SELECT_F16 (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(f128 (selectcc i1:$lhs, i1:$rhs, f128:$tval, f128:$fval, SETNE)),
(SELECT_F16 (CRXOR $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETLT)),
(SELECT_VRRC (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETULT)),
(SELECT_VRRC (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETLE)),
(SELECT_VRRC (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETULE)),
(SELECT_VRRC (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETEQ)),
(SELECT_VRRC (CREQV $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETGE)),
(SELECT_VRRC (CRORC $rhs, $lhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETUGE)),
(SELECT_VRRC (CRORC $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETGT)),
(SELECT_VRRC (CRANDC $rhs, $lhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETUGT)),
(SELECT_VRRC (CRANDC $lhs, $rhs), $tval, $fval)>;
def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETNE)),
(SELECT_VRRC (CRXOR $lhs, $rhs), $tval, $fval)>;
def ANDIo_1_EQ_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in),
"#ANDIo_1_EQ_BIT",
[(set i1:$dst, (trunc (not i32:$in)))]>;
def ANDIo_1_GT_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in),
"#ANDIo_1_GT_BIT",
[(set i1:$dst, (trunc i32:$in))]>;
def ANDIo_1_EQ_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in),
"#ANDIo_1_EQ_BIT8",
[(set i1:$dst, (trunc (not i64:$in)))]>;
def ANDIo_1_GT_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in),
"#ANDIo_1_GT_BIT8",
[(set i1:$dst, (trunc i64:$in))]>;
def : Pat<(i1 (not (trunc i32:$in))),
(ANDIo_1_EQ_BIT $in)>;
def : Pat<(i1 (not (trunc i64:$in))),
(ANDIo_1_EQ_BIT8 $in)>;
//===----------------------------------------------------------------------===//
// PowerPC Instructions used for assembler/disassembler only
//
// FIXME: For B=0 or B > 8, the registers following RT are used.
// WARNING: Do not add patterns for this instruction without fixing this.
def LSWI : XForm_base_r3xo_memOp<31, 597, (outs gprc:$RT),
(ins gprc:$A, u5imm:$B),
"lswi $RT, $A, $B", IIC_LdStLoad, []>;
// FIXME: For B=0 or B > 8, the registers following RT are used.
// WARNING: Do not add patterns for this instruction without fixing this.
def STSWI : XForm_base_r3xo_memOp<31, 725, (outs),
(ins gprc:$RT, gprc:$A, u5imm:$B),
"stswi $RT, $A, $B", IIC_LdStLoad, []>;
def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins),
"isync", IIC_SprISYNC, []>;
def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src),
"icbi $src", IIC_LdStICBI, []>;
// We used to have EIEIO as value but E[0-9A-Z] is a reserved name
def EnforceIEIO : XForm_24_eieio<31, 854, (outs), (ins),
"eieio", IIC_LdStLoad, []>;
def WAIT : XForm_24_sync<31, 30, (outs), (ins i32imm:$L),
"wait $L", IIC_LdStLoad, []>;
def MBAR : XForm_mbar<31, 854, (outs), (ins u5imm:$MO),
"mbar $MO", IIC_LdStLoad>, Requires<[IsBookE]>;
def MTSR: XForm_sr<31, 210, (outs), (ins gprc:$RS, u4imm:$SR),
"mtsr $SR, $RS", IIC_SprMTSR>;
def MFSR: XForm_sr<31, 595, (outs gprc:$RS), (ins u4imm:$SR),
"mfsr $RS, $SR", IIC_SprMFSR>;
def MTSRIN: XForm_srin<31, 242, (outs), (ins gprc:$RS, gprc:$RB),
"mtsrin $RS, $RB", IIC_SprMTSR>;
def MFSRIN: XForm_srin<31, 659, (outs gprc:$RS), (ins gprc:$RB),
"mfsrin $RS, $RB", IIC_SprMFSR>;
def MTMSR: XForm_mtmsr<31, 146, (outs), (ins gprc:$RS, i32imm:$L),
"mtmsr $RS, $L", IIC_SprMTMSR>;
def WRTEE: XForm_mtmsr<31, 131, (outs), (ins gprc:$RS),
"wrtee $RS", IIC_SprMTMSR>, Requires<[IsBookE]> {
let L = 0;
}
def WRTEEI: I<31, (outs), (ins i1imm:$E), "wrteei $E", IIC_SprMTMSR>,
Requires<[IsBookE]> {
bits<1> E;
let Inst{16} = E;
let Inst{21-30} = 163;
}
def DCCCI : XForm_tlb<454, (outs), (ins gprc:$A, gprc:$B),
"dccci $A, $B", IIC_LdStLoad>, Requires<[IsPPC4xx]>;
def ICCCI : XForm_tlb<966, (outs), (ins gprc:$A, gprc:$B),
"iccci $A, $B", IIC_LdStLoad>, Requires<[IsPPC4xx]>;
def : InstAlias<"dci 0", (DCCCI R0, R0)>, Requires<[IsPPC4xx]>;
def : InstAlias<"dccci", (DCCCI R0, R0)>, Requires<[IsPPC4xx]>;
def : InstAlias<"ici 0", (ICCCI R0, R0)>, Requires<[IsPPC4xx]>;
def : InstAlias<"iccci", (ICCCI R0, R0)>, Requires<[IsPPC4xx]>;
def MFMSR : XForm_rs<31, 83, (outs gprc:$RT), (ins),
"mfmsr $RT", IIC_SprMFMSR, []>;
def MTMSRD : XForm_mtmsr<31, 178, (outs), (ins gprc:$RS, i32imm:$L),
"mtmsrd $RS, $L", IIC_SprMTMSRD>;
def MCRFS : XLForm_3<63, 64, (outs crrc:$BF), (ins crrc:$BFA),
"mcrfs $BF, $BFA", IIC_BrMCR>;
def MTFSFI : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W),
"mtfsfi $BF, $U, $W", IIC_IntMFFS>;
def MTFSFIo : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W),
"mtfsfi. $BF, $U, $W", IIC_IntMFFS>, isDOT;
def : InstAlias<"mtfsfi $BF, $U", (MTFSFI crrc:$BF, i32imm:$U, 0)>;
def : InstAlias<"mtfsfi. $BF, $U", (MTFSFIo crrc:$BF, i32imm:$U, 0)>;
let Predicates = [HasFPU] in {
def MTFSF : XFLForm_1<63, 711, (outs),
(ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W),
"mtfsf $FLM, $FRB, $L, $W", IIC_IntMFFS, []>;
def MTFSFo : XFLForm_1<63, 711, (outs),
(ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W),
"mtfsf. $FLM, $FRB, $L, $W", IIC_IntMFFS, []>, isDOT;
def : InstAlias<"mtfsf $FLM, $FRB", (MTFSF i32imm:$FLM, f8rc:$FRB, 0, 0)>;
def : InstAlias<"mtfsf. $FLM, $FRB", (MTFSFo i32imm:$FLM, f8rc:$FRB, 0, 0)>;
}
def SLBIE : XForm_16b<31, 434, (outs), (ins gprc:$RB),
"slbie $RB", IIC_SprSLBIE, []>;
def SLBMTE : XForm_26<31, 402, (outs), (ins gprc:$RS, gprc:$RB),
"slbmte $RS, $RB", IIC_SprSLBMTE, []>;
def SLBMFEE : XForm_26<31, 915, (outs gprc:$RT), (ins gprc:$RB),
"slbmfee $RT, $RB", IIC_SprSLBMFEE, []>;
def SLBMFEV : XLForm_1_gen<31, 851, (outs gprc:$RT), (ins gprc:$RB),
"slbmfev $RT, $RB", IIC_SprSLBMFEV, []>;
def SLBIA : XForm_0<31, 498, (outs), (ins), "slbia", IIC_SprSLBIA, []>;
let Defs = [CR0] in
def SLBFEEo : XForm_26<31, 979, (outs gprc:$RT), (ins gprc:$RB),
"slbfee. $RT, $RB", IIC_SprSLBFEE, []>, isDOT;
def TLBIA : XForm_0<31, 370, (outs), (ins),
"tlbia", IIC_SprTLBIA, []>;
def TLBSYNC : XForm_0<31, 566, (outs), (ins),
"tlbsync", IIC_SprTLBSYNC, []>;
def TLBIEL : XForm_16b<31, 274, (outs), (ins gprc:$RB),
"tlbiel $RB", IIC_SprTLBIEL, []>;
def TLBLD : XForm_16b<31, 978, (outs), (ins gprc:$RB),
"tlbld $RB", IIC_LdStLoad, []>, Requires<[IsPPC6xx]>;
def TLBLI : XForm_16b<31, 1010, (outs), (ins gprc:$RB),
"tlbli $RB", IIC_LdStLoad, []>, Requires<[IsPPC6xx]>;
def TLBIE : XForm_26<31, 306, (outs), (ins gprc:$RS, gprc:$RB),
"tlbie $RB,$RS", IIC_SprTLBIE, []>;
def TLBSX : XForm_tlb<914, (outs), (ins gprc:$A, gprc:$B), "tlbsx $A, $B",
IIC_LdStLoad>, Requires<[IsBookE]>;
def TLBIVAX : XForm_tlb<786, (outs), (ins gprc:$A, gprc:$B), "tlbivax $A, $B",
IIC_LdStLoad>, Requires<[IsBookE]>;
def TLBRE : XForm_24_eieio<31, 946, (outs), (ins),
"tlbre", IIC_LdStLoad, []>, Requires<[IsBookE]>;
def TLBWE : XForm_24_eieio<31, 978, (outs), (ins),
"tlbwe", IIC_LdStLoad, []>, Requires<[IsBookE]>;
def TLBRE2 : XForm_tlbws<31, 946, (outs gprc:$RS), (ins gprc:$A, i1imm:$WS),
"tlbre $RS, $A, $WS", IIC_LdStLoad, []>, Requires<[IsPPC4xx]>;
def TLBWE2 : XForm_tlbws<31, 978, (outs), (ins gprc:$RS, gprc:$A, i1imm:$WS),
"tlbwe $RS, $A, $WS", IIC_LdStLoad, []>, Requires<[IsPPC4xx]>;
def TLBSX2 : XForm_base_r3xo<31, 914, (outs), (ins gprc:$RST, gprc:$A, gprc:$B),
"tlbsx $RST, $A, $B", IIC_LdStLoad, []>,
Requires<[IsPPC4xx]>;
def TLBSX2D : XForm_base_r3xo<31, 914, (outs),
(ins gprc:$RST, gprc:$A, gprc:$B),
"tlbsx. $RST, $A, $B", IIC_LdStLoad, []>,
Requires<[IsPPC4xx]>, isDOT;
def RFID : XForm_0<19, 18, (outs), (ins), "rfid", IIC_IntRFID, []>;
def RFI : XForm_0<19, 50, (outs), (ins), "rfi", IIC_SprRFI, []>,
Requires<[IsBookE]>;
def RFCI : XForm_0<19, 51, (outs), (ins), "rfci", IIC_BrB, []>,
Requires<[IsBookE]>;
def RFDI : XForm_0<19, 39, (outs), (ins), "rfdi", IIC_BrB, []>,
Requires<[IsE500]>;
def RFMCI : XForm_0<19, 38, (outs), (ins), "rfmci", IIC_BrB, []>,
Requires<[IsE500]>;
def MFDCR : XFXForm_1<31, 323, (outs gprc:$RT), (ins i32imm:$SPR),
"mfdcr $RT, $SPR", IIC_SprMFSPR>, Requires<[IsPPC4xx]>;
def MTDCR : XFXForm_1<31, 451, (outs), (ins gprc:$RT, i32imm:$SPR),
"mtdcr $SPR, $RT", IIC_SprMTSPR>, Requires<[IsPPC4xx]>;
def HRFID : XLForm_1_np<19, 274, (outs), (ins), "hrfid", IIC_BrB, []>;
def NAP : XLForm_1_np<19, 434, (outs), (ins), "nap", IIC_BrB, []>;
def ATTN : XForm_attn<0, 256, (outs), (ins), "attn", IIC_BrB>;
def LBZCIX : XForm_base_r3xo_memOp<31, 853, (outs gprc:$RST),
(ins gprc:$A, gprc:$B),
"lbzcix $RST, $A, $B", IIC_LdStLoad, []>;
def LHZCIX : XForm_base_r3xo_memOp<31, 821, (outs gprc:$RST),
(ins gprc:$A, gprc:$B),
"lhzcix $RST, $A, $B", IIC_LdStLoad, []>;
def LWZCIX : XForm_base_r3xo_memOp<31, 789, (outs gprc:$RST),
(ins gprc:$A, gprc:$B),
"lwzcix $RST, $A, $B", IIC_LdStLoad, []>;
def LDCIX : XForm_base_r3xo_memOp<31, 885, (outs gprc:$RST),
(ins gprc:$A, gprc:$B),
"ldcix $RST, $A, $B", IIC_LdStLoad, []>;
def STBCIX : XForm_base_r3xo_memOp<31, 981, (outs),
(ins gprc:$RST, gprc:$A, gprc:$B),
"stbcix $RST, $A, $B", IIC_LdStLoad, []>;
def STHCIX : XForm_base_r3xo_memOp<31, 949, (outs),
(ins gprc:$RST, gprc:$A, gprc:$B),
"sthcix $RST, $A, $B", IIC_LdStLoad, []>;
def STWCIX : XForm_base_r3xo_memOp<31, 917, (outs),
(ins gprc:$RST, gprc:$A, gprc:$B),
"stwcix $RST, $A, $B", IIC_LdStLoad, []>;
def STDCIX : XForm_base_r3xo_memOp<31, 1013, (outs),
(ins gprc:$RST, gprc:$A, gprc:$B),
"stdcix $RST, $A, $B", IIC_LdStLoad, []>;
// External PID Load Store Instructions
def LBEPX : XForm_1<31, 95, (outs gprc:$rD), (ins memrr:$src),
"lbepx $rD, $src", IIC_LdStLoad, []>,
Requires<[IsE500]>;
def LFDEPX : XForm_25<31, 607, (outs f8rc:$frD), (ins memrr:$src),
"lfdepx $frD, $src", IIC_LdStLFD, []>,
Requires<[IsE500]>;
def LHEPX : XForm_1<31, 287, (outs gprc:$rD), (ins memrr:$src),
"lhepx $rD, $src", IIC_LdStLoad, []>,
Requires<[IsE500]>;
def LWEPX : XForm_1<31, 31, (outs gprc:$rD), (ins memrr:$src),
"lwepx $rD, $src", IIC_LdStLoad, []>,
Requires<[IsE500]>;
def STBEPX : XForm_8<31, 223, (outs), (ins gprc:$rS, memrr:$dst),
"stbepx $rS, $dst", IIC_LdStStore, []>,
Requires<[IsE500]>;
def STFDEPX : XForm_28_memOp<31, 735, (outs), (ins f8rc:$frS, memrr:$dst),
"stfdepx $frS, $dst", IIC_LdStSTFD, []>,
Requires<[IsE500]>;
def STHEPX : XForm_8<31, 415, (outs), (ins gprc:$rS, memrr:$dst),
"sthepx $rS, $dst", IIC_LdStStore, []>,
Requires<[IsE500]>;
def STWEPX : XForm_8<31, 159, (outs), (ins gprc:$rS, memrr:$dst),
"stwepx $rS, $dst", IIC_LdStStore, []>,
Requires<[IsE500]>;
def DCBFEP : DCB_Form<127, 0, (outs), (ins memrr:$dst), "dcbfep $dst",
IIC_LdStDCBF, []>, Requires<[IsE500]>;
def DCBSTEP : DCB_Form<63, 0, (outs), (ins memrr:$dst), "dcbstep $dst",
IIC_LdStDCBF, []>, Requires<[IsE500]>;
def DCBTEP : DCB_Form_hint<319, (outs), (ins memrr:$dst, u5imm:$TH),
"dcbtep $TH, $dst", IIC_LdStDCBF, []>,
Requires<[IsE500]>;
def DCBTSTEP : DCB_Form_hint<255, (outs), (ins memrr:$dst, u5imm:$TH),
"dcbtstep $TH, $dst", IIC_LdStDCBF, []>,
Requires<[IsE500]>;
def DCBZEP : DCB_Form<1023, 0, (outs), (ins memrr:$dst), "dcbzep $dst",
IIC_LdStDCBF, []>, Requires<[IsE500]>;
def DCBZLEP : DCB_Form<1023, 1, (outs), (ins memrr:$dst), "dcbzlep $dst",
IIC_LdStDCBF, []>, Requires<[IsE500]>;
def ICBIEP : XForm_1a<31, 991, (outs), (ins memrr:$src), "icbiep $src",
IIC_LdStICBI, []>, Requires<[IsE500]>;
//===----------------------------------------------------------------------===//
// PowerPC Assembler Instruction Aliases
//
// Pseudo-instructions for alternate assembly syntax (never used by codegen).
// These are aliases that require C++ handling to convert to the target
// instruction, while InstAliases can be handled directly by tblgen.
class PPCAsmPseudo<string asm, dag iops>
: Instruction {
let Namespace = "PPC";
bit PPC64 = 0; // Default value, override with isPPC64
let OutOperandList = (outs);
let InOperandList = iops;
let Pattern = [];
let AsmString = asm;
let isAsmParserOnly = 1;
let isPseudo = 1;
let hasNoSchedulingInfo = 1;
}
def : InstAlias<"sc", (SC 0)>;
def : InstAlias<"sync", (SYNC 0)>, Requires<[HasSYNC]>;
def : InstAlias<"msync", (SYNC 0), 0>, Requires<[HasSYNC]>;
def : InstAlias<"lwsync", (SYNC 1)>, Requires<[HasSYNC]>;
def : InstAlias<"ptesync", (SYNC 2)>, Requires<[HasSYNC]>;
def : InstAlias<"wait", (WAIT 0)>;
def : InstAlias<"waitrsv", (WAIT 1)>;
def : InstAlias<"waitimpl", (WAIT 2)>;
def : InstAlias<"mbar", (MBAR 0)>, Requires<[IsBookE]>;
def DCBTx : PPCAsmPseudo<"dcbt $dst", (ins memrr:$dst)>;
def DCBTSTx : PPCAsmPseudo<"dcbtst $dst", (ins memrr:$dst)>;
def DCBTCT : PPCAsmPseudo<"dcbtct $dst, $TH", (ins memrr:$dst, u5imm:$TH)>;
def DCBTDS : PPCAsmPseudo<"dcbtds $dst, $TH", (ins memrr:$dst, u5imm:$TH)>;
def DCBTT : PPCAsmPseudo<"dcbtt $dst", (ins memrr:$dst)>;
def DCBTSTCT : PPCAsmPseudo<"dcbtstct $dst, $TH", (ins memrr:$dst, u5imm:$TH)>;
def DCBTSTDS : PPCAsmPseudo<"dcbtstds $dst, $TH", (ins memrr:$dst, u5imm:$TH)>;
def DCBTSTT : PPCAsmPseudo<"dcbtstt $dst", (ins memrr:$dst)>;
def DCBFx : PPCAsmPseudo<"dcbf $dst", (ins memrr:$dst)>;
def DCBFL : PPCAsmPseudo<"dcbfl $dst", (ins memrr:$dst)>;
def DCBFLP : PPCAsmPseudo<"dcbflp $dst", (ins memrr:$dst)>;
def : InstAlias<"crset $bx", (CREQV crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>;
def : InstAlias<"crclr $bx", (CRXOR crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>;
def : InstAlias<"crmove $bx, $by", (CROR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>;
def : InstAlias<"crnot $bx, $by", (CRNOR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>;
def : InstAlias<"mtxer $Rx", (MTSPR 1, gprc:$Rx)>;
def : InstAlias<"mfxer $Rx", (MFSPR gprc:$Rx, 1)>;
def : InstAlias<"mfrtcu $Rx", (MFSPR gprc:$Rx, 4)>;
def : InstAlias<"mfrtcl $Rx", (MFSPR gprc:$Rx, 5)>;
def : InstAlias<"mtdscr $Rx", (MTSPR 17, gprc:$Rx)>;
def : InstAlias<"mfdscr $Rx", (MFSPR gprc:$Rx, 17)>;
def : InstAlias<"mtdsisr $Rx", (MTSPR 18, gprc:$Rx)>;
def : InstAlias<"mfdsisr $Rx", (MFSPR gprc:$Rx, 18)>;
def : InstAlias<"mtdar $Rx", (MTSPR 19, gprc:$Rx)>;
def : InstAlias<"mfdar $Rx", (MFSPR gprc:$Rx, 19)>;
def : InstAlias<"mtdec $Rx", (MTSPR 22, gprc:$Rx)>;
def : InstAlias<"mfdec $Rx", (MFSPR gprc:$Rx, 22)>;
def : InstAlias<"mtsdr1 $Rx", (MTSPR 25, gprc:$Rx)>;
def : InstAlias<"mfsdr1 $Rx", (MFSPR gprc:$Rx, 25)>;
def : InstAlias<"mtsrr0 $Rx", (MTSPR 26, gprc:$Rx)>;
def : InstAlias<"mfsrr0 $Rx", (MFSPR gprc:$Rx, 26)>;
def : InstAlias<"mtsrr1 $Rx", (MTSPR 27, gprc:$Rx)>;
def : InstAlias<"mfsrr1 $Rx", (MFSPR gprc:$Rx, 27)>;
def : InstAlias<"mtsrr2 $Rx", (MTSPR 990, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfsrr2 $Rx", (MFSPR gprc:$Rx, 990)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mtsrr3 $Rx", (MTSPR 991, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfsrr3 $Rx", (MFSPR gprc:$Rx, 991)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mtcfar $Rx", (MTSPR 28, gprc:$Rx)>;
def : InstAlias<"mfcfar $Rx", (MFSPR gprc:$Rx, 28)>;
def : InstAlias<"mtamr $Rx", (MTSPR 29, gprc:$Rx)>;
def : InstAlias<"mfamr $Rx", (MFSPR gprc:$Rx, 29)>;
def : InstAlias<"mtpid $Rx", (MTSPR 48, gprc:$Rx)>, Requires<[IsBookE]>;
def : InstAlias<"mfpid $Rx", (MFSPR gprc:$Rx, 48)>, Requires<[IsBookE]>;
def : InstAlias<"mftb $Rx", (MFTB gprc:$Rx, 268)>;
def : InstAlias<"mftbl $Rx", (MFTB gprc:$Rx, 268)>;
def : InstAlias<"mftbu $Rx", (MFTB gprc:$Rx, 269)>;
def : InstAlias<"mttbl $Rx", (MTSPR 284, gprc:$Rx)>;
def : InstAlias<"mttbu $Rx", (MTSPR 285, gprc:$Rx)>;
def : InstAlias<"mftblo $Rx", (MFSPR gprc:$Rx, 989)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mttblo $Rx", (MTSPR 989, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mftbhi $Rx", (MFSPR gprc:$Rx, 988)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mttbhi $Rx", (MTSPR 988, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"xnop", (XORI R0, R0, 0)>;
def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
def : InstAlias<"mr. $rA, $rB", (OR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
def : InstAlias<"not $rA, $rB", (NOR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
def : InstAlias<"not. $rA, $rB", (NOR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>;
def : InstAlias<"mtcr $rA", (MTCRF8 255, g8rc:$rA)>;
foreach BATR = 0-3 in {
def : InstAlias<"mtdbatu "#BATR#", $Rx",
(MTSPR !add(BATR, !add(BATR, 536)), gprc:$Rx)>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mfdbatu $Rx, "#BATR,
(MFSPR gprc:$Rx, !add(BATR, !add(BATR, 536)))>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mtdbatl "#BATR#", $Rx",
(MTSPR !add(BATR, !add(BATR, 537)), gprc:$Rx)>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mfdbatl $Rx, "#BATR,
(MFSPR gprc:$Rx, !add(BATR, !add(BATR, 537)))>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mtibatu "#BATR#", $Rx",
(MTSPR !add(BATR, !add(BATR, 528)), gprc:$Rx)>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mfibatu $Rx, "#BATR,
(MFSPR gprc:$Rx, !add(BATR, !add(BATR, 528)))>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mtibatl "#BATR#", $Rx",
(MTSPR !add(BATR, !add(BATR, 529)), gprc:$Rx)>,
Requires<[IsPPC6xx]>;
def : InstAlias<"mfibatl $Rx, "#BATR,
(MFSPR gprc:$Rx, !add(BATR, !add(BATR, 529)))>,
Requires<[IsPPC6xx]>;
}
foreach BR = 0-7 in {
def : InstAlias<"mfbr"#BR#" $Rx",
(MFDCR gprc:$Rx, !add(BR, 0x80))>,
Requires<[IsPPC4xx]>;
def : InstAlias<"mtbr"#BR#" $Rx",
(MTDCR gprc:$Rx, !add(BR, 0x80))>,
Requires<[IsPPC4xx]>;
}
def : InstAlias<"mtdccr $Rx", (MTSPR 1018, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfdccr $Rx", (MFSPR gprc:$Rx, 1018)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mticcr $Rx", (MTSPR 1019, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mficcr $Rx", (MFSPR gprc:$Rx, 1019)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mtdear $Rx", (MTSPR 981, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfdear $Rx", (MFSPR gprc:$Rx, 981)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mtesr $Rx", (MTSPR 980, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfesr $Rx", (MFSPR gprc:$Rx, 980)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mfspefscr $Rx", (MFSPR gprc:$Rx, 512)>;
def : InstAlias<"mtspefscr $Rx", (MTSPR 512, gprc:$Rx)>;
def : InstAlias<"mttcr $Rx", (MTSPR 986, gprc:$Rx)>, Requires<[IsPPC4xx]>;
def : InstAlias<"mftcr $Rx", (MFSPR gprc:$Rx, 986)>, Requires<[IsPPC4xx]>;
def LAx : PPCAsmPseudo<"la $rA, $addr", (ins gprc:$rA, memri:$addr)>;
def SUBI : PPCAsmPseudo<"subi $rA, $rB, $imm",
(ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
def SUBIS : PPCAsmPseudo<"subis $rA, $rB, $imm",
(ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
def SUBIC : PPCAsmPseudo<"subic $rA, $rB, $imm",
(ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
def SUBICo : PPCAsmPseudo<"subic. $rA, $rB, $imm",
(ins gprc:$rA, gprc:$rB, s16imm:$imm)>;
def : InstAlias<"sub $rA, $rB, $rC", (SUBF8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
def : InstAlias<"sub. $rA, $rB, $rC", (SUBF8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
def : InstAlias<"subc $rA, $rB, $rC", (SUBFC8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
def : InstAlias<"subc. $rA, $rB, $rC", (SUBFC8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>;
def : InstAlias<"mtmsrd $RS", (MTMSRD gprc:$RS, 0)>;
def : InstAlias<"mtmsr $RS", (MTMSR gprc:$RS, 0)>;
def : InstAlias<"mfasr $RT", (MFSPR gprc:$RT, 280)>;
def : InstAlias<"mtasr $RT", (MTSPR 280, gprc:$RT)>;
foreach SPRG = 0-3 in {
def : InstAlias<"mfsprg $RT, "#SPRG, (MFSPR gprc:$RT, !add(SPRG, 272))>;
def : InstAlias<"mfsprg"#SPRG#" $RT", (MFSPR gprc:$RT, !add(SPRG, 272))>;
def : InstAlias<"mtsprg "#SPRG#", $RT", (MTSPR !add(SPRG, 272), gprc:$RT)>;
def : InstAlias<"mtsprg"#SPRG#" $RT", (MTSPR !add(SPRG, 272), gprc:$RT)>;
}
foreach SPRG = 4-7 in {
def : InstAlias<"mfsprg $RT, "#SPRG, (MFSPR gprc:$RT, !add(SPRG, 256))>,
Requires<[IsBookE]>;
def : InstAlias<"mfsprg"#SPRG#" $RT", (MFSPR gprc:$RT, !add(SPRG, 256))>,
Requires<[IsBookE]>;
def : InstAlias<"mtsprg "#SPRG#", $RT", (MTSPR !add(SPRG, 256), gprc:$RT)>,
Requires<[IsBookE]>;
def : InstAlias<"mtsprg"#SPRG#" $RT", (MTSPR !add(SPRG, 256), gprc:$RT)>,
Requires<[IsBookE]>;
}
def : InstAlias<"mtasr $RS", (MTSPR 280, gprc:$RS)>;
def : InstAlias<"mfdec $RT", (MFSPR gprc:$RT, 22)>;
def : InstAlias<"mtdec $RT", (MTSPR 22, gprc:$RT)>;
def : InstAlias<"mfpvr $RT", (MFSPR gprc:$RT, 287)>;
def : InstAlias<"mfsdr1 $RT", (MFSPR gprc:$RT, 25)>;
def : InstAlias<"mtsdr1 $RT", (MTSPR 25, gprc:$RT)>;
def : InstAlias<"mfsrr0 $RT", (MFSPR gprc:$RT, 26)>;
def : InstAlias<"mfsrr1 $RT", (MFSPR gprc:$RT, 27)>;
def : InstAlias<"mtsrr0 $RT", (MTSPR 26, gprc:$RT)>;
def : InstAlias<"mtsrr1 $RT", (MTSPR 27, gprc:$RT)>;
def : InstAlias<"tlbie $RB", (TLBIE R0, gprc:$RB)>;
def : InstAlias<"tlbrehi $RS, $A", (TLBRE2 gprc:$RS, gprc:$A, 0)>,
Requires<[IsPPC4xx]>;
def : InstAlias<"tlbrelo $RS, $A", (TLBRE2 gprc:$RS, gprc:$A, 1)>,
Requires<[IsPPC4xx]>;
def : InstAlias<"tlbwehi $RS, $A", (TLBWE2 gprc:$RS, gprc:$A, 0)>,
Requires<[IsPPC4xx]>;
def : InstAlias<"tlbwelo $RS, $A", (TLBWE2 gprc:$RS, gprc:$A, 1)>,
Requires<[IsPPC4xx]>;
def EXTLWI : PPCAsmPseudo<"extlwi $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def EXTLWIo : PPCAsmPseudo<"extlwi. $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def EXTRWI : PPCAsmPseudo<"extrwi $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def EXTRWIo : PPCAsmPseudo<"extrwi. $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def INSLWI : PPCAsmPseudo<"inslwi $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def INSLWIo : PPCAsmPseudo<"inslwi. $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def INSRWI : PPCAsmPseudo<"insrwi $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def INSRWIo : PPCAsmPseudo<"insrwi. $rA, $rS, $n, $b",
(ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>;
def ROTRWI : PPCAsmPseudo<"rotrwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def ROTRWIo : PPCAsmPseudo<"rotrwi. $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SLWI : PPCAsmPseudo<"slwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SLWIo : PPCAsmPseudo<"slwi. $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SRWI : PPCAsmPseudo<"srwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def SRWIo : PPCAsmPseudo<"srwi. $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def CLRRWI : PPCAsmPseudo<"clrrwi $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def CLRRWIo : PPCAsmPseudo<"clrrwi. $rA, $rS, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$n)>;
def CLRLSLWI : PPCAsmPseudo<"clrlslwi $rA, $rS, $b, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>;
def CLRLSLWIo : PPCAsmPseudo<"clrlslwi. $rA, $rS, $b, $n",
(ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>;
def : InstAlias<"rotlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>;
def : InstAlias<"rotlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>;
def : InstAlias<"rotlw $rA, $rS, $rB", (RLWNM gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>;
def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNMo gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>;
def : InstAlias<"clrlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
def : InstAlias<"cntlzw $rA, $rS", (CNTLZW gprc:$rA, gprc:$rS)>;
def : InstAlias<"cntlzw. $rA, $rS", (CNTLZWo gprc:$rA, gprc:$rS)>;
// The POWER variant
def : MnemonicAlias<"cntlz", "cntlzw">;
def : MnemonicAlias<"cntlz.", "cntlzw.">;
def EXTLDI : PPCAsmPseudo<"extldi $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def EXTLDIo : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def EXTRDI : PPCAsmPseudo<"extrdi $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def EXTRDIo : PPCAsmPseudo<"extrdi. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def INSRDI : PPCAsmPseudo<"insrdi $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def INSRDIo : PPCAsmPseudo<"insrdi. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def ROTRDI : PPCAsmPseudo<"rotrdi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def ROTRDIo : PPCAsmPseudo<"rotrdi. $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SLDI : PPCAsmPseudo<"sldi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SLDIo : PPCAsmPseudo<"sldi. $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SRDI : PPCAsmPseudo<"srdi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def SRDIo : PPCAsmPseudo<"srdi. $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def CLRRDI : PPCAsmPseudo<"clrrdi $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def CLRRDIo : PPCAsmPseudo<"clrrdi. $rA, $rS, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n)>;
def CLRLSLDI : PPCAsmPseudo<"clrlsldi $rA, $rS, $b, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>;
def CLRLSLDIo : PPCAsmPseudo<"clrlsldi. $rA, $rS, $b, $n",
(ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>;
def SUBPCIS : PPCAsmPseudo<"subpcis $RT, $D", (ins g8rc:$RT, s16imm:$D)>;
def : InstAlias<"rotldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>;
def : InstAlias<"rotldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>;
def : InstAlias<"rotld $rA, $rS, $rB", (RLDCL g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>;
def : InstAlias<"rotld. $rA, $rS, $rB", (RLDCLo g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>;
def : InstAlias<"clrldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>;
def : InstAlias<"clrldi $rA, $rS, $n",
(RLDICL_32_64 g8rc:$rA, gprc:$rS, 0, u6imm:$n)>;
def : InstAlias<"clrldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>;
def : InstAlias<"lnia $RT", (ADDPCIS g8rc:$RT, 0)>;
def RLWINMbm : PPCAsmPseudo<"rlwinm $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
def RLWINMobm : PPCAsmPseudo<"rlwinm. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
def RLWIMIbm : PPCAsmPseudo<"rlwimi $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
def RLWIMIobm : PPCAsmPseudo<"rlwimi. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
def RLWNMbm : PPCAsmPseudo<"rlwnm $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
def RLWNMobm : PPCAsmPseudo<"rlwnm. $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>;
// These generic branch instruction forms are used for the assembler parser only.
// Defs and Uses are conservative, since we don't know the BO value.
let PPC970_Unit = 7, isBranch = 1 in {
let Defs = [CTR], Uses = [CTR, RM] in {
def gBC : BForm_3<16, 0, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst),
"bc $bo, $bi, $dst">;
def gBCA : BForm_3<16, 1, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst),
"bca $bo, $bi, $dst">;
let isAsmParserOnly = 1 in {
def gBCat : BForm_3_at<16, 0, 0, (outs),
(ins u5imm:$bo, atimm:$at, crbitrc:$bi,
condbrtarget:$dst),
"bc$at $bo, $bi, $dst">;
def gBCAat : BForm_3_at<16, 1, 0, (outs),
(ins u5imm:$bo, atimm:$at, crbitrc:$bi,
abscondbrtarget:$dst),
"bca$at $bo, $bi, $dst">;
} // isAsmParserOnly = 1
}
let Defs = [LR, CTR], Uses = [CTR, RM] in {
def gBCL : BForm_3<16, 0, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst),
"bcl $bo, $bi, $dst">;
def gBCLA : BForm_3<16, 1, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst),
"bcla $bo, $bi, $dst">;
let isAsmParserOnly = 1 in {
def gBCLat : BForm_3_at<16, 0, 1, (outs),
(ins u5imm:$bo, atimm:$at, crbitrc:$bi,
condbrtarget:$dst),
"bcl$at $bo, $bi, $dst">;
def gBCLAat : BForm_3_at<16, 1, 1, (outs),
(ins u5imm:$bo, atimm:$at, crbitrc:$bi,
abscondbrtarget:$dst),
"bcla$at $bo, $bi, $dst">;
} // // isAsmParserOnly = 1
}
let Defs = [CTR], Uses = [CTR, LR, RM] in
def gBCLR : XLForm_2<19, 16, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
"bclr $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
def gBCLRL : XLForm_2<19, 16, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
"bclrl $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [CTR], Uses = [CTR, LR, RM] in
def gBCCTR : XLForm_2<19, 528, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
"bcctr $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
def gBCCTRL : XLForm_2<19, 528, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
"bcctrl $bo, $bi, $bh", IIC_BrB, []>;
}
multiclass BranchSimpleMnemonicAT<string pm, int at> {
def : InstAlias<"bc"#pm#" $bo, $bi, $dst", (gBCat u5imm:$bo, at, crbitrc:$bi,
condbrtarget:$dst)>;
def : InstAlias<"bca"#pm#" $bo, $bi, $dst", (gBCAat u5imm:$bo, at, crbitrc:$bi,
condbrtarget:$dst)>;
def : InstAlias<"bcl"#pm#" $bo, $bi, $dst", (gBCLat u5imm:$bo, at, crbitrc:$bi,
condbrtarget:$dst)>;
def : InstAlias<"bcla"#pm#" $bo, $bi, $dst", (gBCLAat u5imm:$bo, at, crbitrc:$bi,
condbrtarget:$dst)>;
}
defm : BranchSimpleMnemonicAT<"+", 3>;
defm : BranchSimpleMnemonicAT<"-", 2>;
def : InstAlias<"bclr $bo, $bi", (gBCLR u5imm:$bo, crbitrc:$bi, 0)>;
def : InstAlias<"bclrl $bo, $bi", (gBCLRL u5imm:$bo, crbitrc:$bi, 0)>;
def : InstAlias<"bcctr $bo, $bi", (gBCCTR u5imm:$bo, crbitrc:$bi, 0)>;
def : InstAlias<"bcctrl $bo, $bi", (gBCCTRL u5imm:$bo, crbitrc:$bi, 0)>;
multiclass BranchSimpleMnemonic1<string name, string pm, int bo> {
def : InstAlias<"b"#name#pm#" $bi, $dst", (gBC bo, crbitrc:$bi, condbrtarget:$dst)>;
def : InstAlias<"b"#name#"a"#pm#" $bi, $dst", (gBCA bo, crbitrc:$bi, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lr"#pm#" $bi", (gBCLR bo, crbitrc:$bi, 0)>;
def : InstAlias<"b"#name#"l"#pm#" $bi, $dst", (gBCL bo, crbitrc:$bi, condbrtarget:$dst)>;
def : InstAlias<"b"#name#"la"#pm#" $bi, $dst", (gBCLA bo, crbitrc:$bi, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lrl"#pm#" $bi", (gBCLRL bo, crbitrc:$bi, 0)>;
}
multiclass BranchSimpleMnemonic2<string name, string pm, int bo>
: BranchSimpleMnemonic1<name, pm, bo> {
def : InstAlias<"b"#name#"ctr"#pm#" $bi", (gBCCTR bo, crbitrc:$bi, 0)>;
def : InstAlias<"b"#name#"ctrl"#pm#" $bi", (gBCCTRL bo, crbitrc:$bi, 0)>;
}
defm : BranchSimpleMnemonic2<"t", "", 12>;
defm : BranchSimpleMnemonic2<"f", "", 4>;
defm : BranchSimpleMnemonic2<"t", "-", 14>;
defm : BranchSimpleMnemonic2<"f", "-", 6>;
defm : BranchSimpleMnemonic2<"t", "+", 15>;
defm : BranchSimpleMnemonic2<"f", "+", 7>;
defm : BranchSimpleMnemonic1<"dnzt", "", 8>;
defm : BranchSimpleMnemonic1<"dnzf", "", 0>;
defm : BranchSimpleMnemonic1<"dzt", "", 10>;
defm : BranchSimpleMnemonic1<"dzf", "", 2>;
multiclass BranchExtendedMnemonicPM<string name, string pm, int bibo> {
def : InstAlias<"b"#name#pm#" $cc, $dst",
(BCC bibo, crrc:$cc, condbrtarget:$dst)>;
def : InstAlias<"b"#name#pm#" $dst",
(BCC bibo, CR0, condbrtarget:$dst)>;
def : InstAlias<"b"#name#"a"#pm#" $cc, $dst",
(BCCA bibo, crrc:$cc, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"a"#pm#" $dst",
(BCCA bibo, CR0, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lr"#pm#" $cc",
(BCCLR bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"lr"#pm,
(BCCLR bibo, CR0)>;
def : InstAlias<"b"#name#"ctr"#pm#" $cc",
(BCCCTR bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"ctr"#pm,
(BCCCTR bibo, CR0)>;
def : InstAlias<"b"#name#"l"#pm#" $cc, $dst",
(BCCL bibo, crrc:$cc, condbrtarget:$dst)>;
def : InstAlias<"b"#name#"l"#pm#" $dst",
(BCCL bibo, CR0, condbrtarget:$dst)>;
def : InstAlias<"b"#name#"la"#pm#" $cc, $dst",
(BCCLA bibo, crrc:$cc, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"la"#pm#" $dst",
(BCCLA bibo, CR0, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lrl"#pm#" $cc",
(BCCLRL bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"lrl"#pm,
(BCCLRL bibo, CR0)>;
def : InstAlias<"b"#name#"ctrl"#pm#" $cc",
(BCCCTRL bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"ctrl"#pm,
(BCCCTRL bibo, CR0)>;
}
multiclass BranchExtendedMnemonic<string name, int bibo> {
defm : BranchExtendedMnemonicPM<name, "", bibo>;
defm : BranchExtendedMnemonicPM<name, "-", !add(bibo, 2)>;
defm : BranchExtendedMnemonicPM<name, "+", !add(bibo, 3)>;
}
defm : BranchExtendedMnemonic<"lt", 12>;
defm : BranchExtendedMnemonic<"gt", 44>;
defm : BranchExtendedMnemonic<"eq", 76>;
defm : BranchExtendedMnemonic<"un", 108>;
defm : BranchExtendedMnemonic<"so", 108>;
defm : BranchExtendedMnemonic<"ge", 4>;
defm : BranchExtendedMnemonic<"nl", 4>;
defm : BranchExtendedMnemonic<"le", 36>;
defm : BranchExtendedMnemonic<"ng", 36>;
defm : BranchExtendedMnemonic<"ne", 68>;
defm : BranchExtendedMnemonic<"nu", 100>;
defm : BranchExtendedMnemonic<"ns", 100>;
def : InstAlias<"cmpwi $rA, $imm", (CMPWI CR0, gprc:$rA, s16imm:$imm)>;
def : InstAlias<"cmpw $rA, $rB", (CMPW CR0, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmplwi $rA, $imm", (CMPLWI CR0, gprc:$rA, u16imm:$imm)>;
def : InstAlias<"cmplw $rA, $rB", (CMPLW CR0, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmpdi $rA, $imm", (CMPDI CR0, g8rc:$rA, s16imm64:$imm)>;
def : InstAlias<"cmpd $rA, $rB", (CMPD CR0, g8rc:$rA, g8rc:$rB)>;
def : InstAlias<"cmpldi $rA, $imm", (CMPLDI CR0, g8rc:$rA, u16imm64:$imm)>;
def : InstAlias<"cmpld $rA, $rB", (CMPLD CR0, g8rc:$rA, g8rc:$rB)>;
def : InstAlias<"cmpi $bf, 0, $rA, $imm", (CMPWI crrc:$bf, gprc:$rA, s16imm:$imm)>;
def : InstAlias<"cmp $bf, 0, $rA, $rB", (CMPW crrc:$bf, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmpli $bf, 0, $rA, $imm", (CMPLWI crrc:$bf, gprc:$rA, u16imm:$imm)>;
def : InstAlias<"cmpl $bf, 0, $rA, $rB", (CMPLW crrc:$bf, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmpi $bf, 1, $rA, $imm", (CMPDI crrc:$bf, g8rc:$rA, s16imm64:$imm)>;
def : InstAlias<"cmp $bf, 1, $rA, $rB", (CMPD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
def : InstAlias<"cmpli $bf, 1, $rA, $imm", (CMPLDI crrc:$bf, g8rc:$rA, u16imm64:$imm)>;
def : InstAlias<"cmpl $bf, 1, $rA, $rB", (CMPLD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
multiclass TrapExtendedMnemonic<string name, int to> {
def : InstAlias<"td"#name#"i $rA, $imm", (TDI to, g8rc:$rA, s16imm:$imm)>;
def : InstAlias<"td"#name#" $rA, $rB", (TD to, g8rc:$rA, g8rc:$rB)>;
def : InstAlias<"tw"#name#"i $rA, $imm", (TWI to, gprc:$rA, s16imm:$imm)>;
def : InstAlias<"tw"#name#" $rA, $rB", (TW to, gprc:$rA, gprc:$rB)>;
}
defm : TrapExtendedMnemonic<"lt", 16>;
defm : TrapExtendedMnemonic<"le", 20>;
defm : TrapExtendedMnemonic<"eq", 4>;
defm : TrapExtendedMnemonic<"ge", 12>;
defm : TrapExtendedMnemonic<"gt", 8>;
defm : TrapExtendedMnemonic<"nl", 12>;
defm : TrapExtendedMnemonic<"ne", 24>;
defm : TrapExtendedMnemonic<"ng", 20>;
defm : TrapExtendedMnemonic<"llt", 2>;
defm : TrapExtendedMnemonic<"lle", 6>;
defm : TrapExtendedMnemonic<"lge", 5>;
defm : TrapExtendedMnemonic<"lgt", 1>;
defm : TrapExtendedMnemonic<"lnl", 5>;
defm : TrapExtendedMnemonic<"lng", 6>;
defm : TrapExtendedMnemonic<"u", 31>;
// Atomic loads
def : Pat<(atomic_load_8 iaddr:$src), (LBZ memri:$src)>;
def : Pat<(atomic_load_16 iaddr:$src), (LHZ memri:$src)>;
def : Pat<(atomic_load_32 iaddr:$src), (LWZ memri:$src)>;
def : Pat<(atomic_load_8 xaddr:$src), (LBZX memrr:$src)>;
def : Pat<(atomic_load_16 xaddr:$src), (LHZX memrr:$src)>;
def : Pat<(atomic_load_32 xaddr:$src), (LWZX memrr:$src)>;
// Atomic stores
def : Pat<(atomic_store_8 iaddr:$ptr, i32:$val), (STB gprc:$val, memri:$ptr)>;
def : Pat<(atomic_store_16 iaddr:$ptr, i32:$val), (STH gprc:$val, memri:$ptr)>;
def : Pat<(atomic_store_32 iaddr:$ptr, i32:$val), (STW gprc:$val, memri:$ptr)>;
def : Pat<(atomic_store_8 xaddr:$ptr, i32:$val), (STBX gprc:$val, memrr:$ptr)>;
def : Pat<(atomic_store_16 xaddr:$ptr, i32:$val), (STHX gprc:$val, memrr:$ptr)>;
def : Pat<(atomic_store_32 xaddr:$ptr, i32:$val), (STWX gprc:$val, memrr:$ptr)>;
let Predicates = [IsISA3_0] in {
// Copy-Paste Facility
// We prefix 'CP' to COPY due to name conflict in Target.td. We also prefix to
// PASTE for naming consistency.
let mayLoad = 1 in
def CP_COPY : X_L1_RA5_RB5<31, 774, "copy" , gprc, IIC_LdStCOPY, []>;
let mayStore = 1 in
def CP_PASTE : X_L1_RA5_RB5<31, 902, "paste" , gprc, IIC_LdStPASTE, []>;
let mayStore = 1, Defs = [CR0] in
def CP_PASTEo : X_L1_RA5_RB5<31, 902, "paste.", gprc, IIC_LdStPASTE, []>, isDOT;
def CP_COPYx : PPCAsmPseudo<"copy $rA, $rB" , (ins gprc:$rA, gprc:$rB)>;
def CP_PASTEx : PPCAsmPseudo<"paste $rA, $rB", (ins gprc:$rA, gprc:$rB)>;
def CP_COPY_FIRST : PPCAsmPseudo<"copy_first $rA, $rB",
(ins gprc:$rA, gprc:$rB)>;
def CP_PASTE_LAST : PPCAsmPseudo<"paste_last $rA, $rB",
(ins gprc:$rA, gprc:$rB)>;
def CP_ABORT : XForm_0<31, 838, (outs), (ins), "cp_abort", IIC_SprABORT, []>;
// Message Synchronize
def MSGSYNC : XForm_0<31, 886, (outs), (ins), "msgsync", IIC_SprMSGSYNC, []>;
// Power-Saving Mode Instruction:
def STOP : XForm_0<19, 370, (outs), (ins), "stop", IIC_SprSTOP, []>;
} // IsISA3_0
// Fast 32-bit reverse bits algorithm:
// Step 1: 1-bit swap (swap odd 1-bit and even 1-bit):
// n = ((n >> 1) & 0x55555555) | ((n << 1) & 0xAAAAAAAA);
// Step 2: 2-bit swap (swap odd 2-bit and even 2-bit):
// n = ((n >> 2) & 0x33333333) | ((n << 2) & 0xCCCCCCCC);
// Step 3: 4-bit swap (swap odd 4-bit and even 4-bit):
// n = ((n >> 4) & 0x0F0F0F0F) | ((n << 4) & 0xF0F0F0F0);
// Step 4: byte reverse (Suppose n = [B1,B2,B3,B4]):
// Step 4.1: Put B4,B2 in the right position (rotate left 3 bytes):
// n' = (n rotl 24); After which n' = [B4, B1, B2, B3]
// Step 4.2: Insert B3 to the right position:
// n' = rlwimi n', n, 8, 8, 15; After which n' = [B4, B3, B2, B3]
// Step 4.3: Insert B1 to the right position:
// n' = rlwimi n', n, 8, 24, 31; After which n' = [B4, B3, B2, B1]
def MaskValues {
dag Lo1 = (ORI (LIS 0x5555), 0x5555);
dag Hi1 = (ORI (LIS 0xAAAA), 0xAAAA);
dag Lo2 = (ORI (LIS 0x3333), 0x3333);
dag Hi2 = (ORI (LIS 0xCCCC), 0xCCCC);
dag Lo4 = (ORI (LIS 0x0F0F), 0x0F0F);
dag Hi4 = (ORI (LIS 0xF0F0), 0xF0F0);
}
def Shift1 {
dag Right = (RLWINM $A, 31, 1, 31);
dag Left = (RLWINM $A, 1, 0, 30);
}
def Swap1 {
dag Bit = (OR (AND Shift1.Right, MaskValues.Lo1),
(AND Shift1.Left, MaskValues.Hi1));
}
def Shift2 {
dag Right = (RLWINM Swap1.Bit, 30, 2, 31);
dag Left = (RLWINM Swap1.Bit, 2, 0, 29);
}
def Swap2 {
dag Bits = (OR (AND Shift2.Right, MaskValues.Lo2),
(AND Shift2.Left, MaskValues.Hi2));
}
def Shift4 {
dag Right = (RLWINM Swap2.Bits, 28, 4, 31);
dag Left = (RLWINM Swap2.Bits, 4, 0, 27);
}
def Swap4 {
dag Bits = (OR (AND Shift4.Right, MaskValues.Lo4),
(AND Shift4.Left, MaskValues.Hi4));
}
def Rotate {
dag Left3Bytes = (RLWINM Swap4.Bits, 24, 0, 31);
}
def RotateInsertByte3 {
dag Left = (RLWIMI Rotate.Left3Bytes, Swap4.Bits, 8, 8, 15);
}
def RotateInsertByte1 {
dag Left = (RLWIMI RotateInsertByte3.Left, Swap4.Bits, 8, 24, 31);
}
def : Pat<(i32 (bitreverse i32:$A)),
(RLDICL_32 RotateInsertByte1.Left, 0, 32)>;
// Fast 64-bit reverse bits algorithm:
// Step 1: 1-bit swap (swap odd 1-bit and even 1-bit):
// n = ((n >> 1) & 0x5555555555555555) | ((n << 1) & 0xAAAAAAAAAAAAAAAA);
// Step 2: 2-bit swap (swap odd 2-bit and even 2-bit):
// n = ((n >> 2) & 0x3333333333333333) | ((n << 2) & 0xCCCCCCCCCCCCCCCC);
// Step 3: 4-bit swap (swap odd 4-bit and even 4-bit):
// n = ((n >> 4) & 0x0F0F0F0F0F0F0F0F) | ((n << 4) & 0xF0F0F0F0F0F0F0F0);
// Step 4: byte reverse (Suppose n = [B0,B1,B2,B3,B4,B5,B6,B7]):
// Apply the same byte reverse algorithm mentioned above for the fast 32-bit
// reverse to both the high 32 bit and low 32 bit of the 64 bit value. And
// then OR them together to get the final result.
def MaskValues64 {
dag Lo1 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo1, sub_32));
dag Hi1 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi1, sub_32));
dag Lo2 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo2, sub_32));
dag Hi2 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi2, sub_32));
dag Lo4 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo4, sub_32));
dag Hi4 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi4, sub_32));
}
def DWMaskValues {
dag Lo1 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo1, 32, 31), 0x5555), 0x5555);
dag Hi1 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi1, 32, 31), 0xAAAA), 0xAAAA);
dag Lo2 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo2, 32, 31), 0x3333), 0x3333);
dag Hi2 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi2, 32, 31), 0xCCCC), 0xCCCC);
dag Lo4 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo4, 32, 31), 0x0F0F), 0x0F0F);
dag Hi4 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi4, 32, 31), 0xF0F0), 0xF0F0);
}
def DWSwapInByte {
dag Swap1 = (OR8 (AND8 (RLDICL $A, 63, 1), DWMaskValues.Lo1),
(AND8 (RLDICR $A, 1, 62), DWMaskValues.Hi1));
dag Swap2 = (OR8 (AND8 (RLDICL Swap1, 62, 2), DWMaskValues.Lo2),
(AND8 (RLDICR Swap1, 2, 61), DWMaskValues.Hi2));
dag Swap4 = (OR8 (AND8 (RLDICL Swap2, 60, 4), DWMaskValues.Lo4),
(AND8 (RLDICR Swap2, 4, 59), DWMaskValues.Hi4));
}
// Intra-byte swap is done, now start inter-byte swap.
def DWBytes4567 {
dag Word = (i32 (EXTRACT_SUBREG DWSwapInByte.Swap4, sub_32));
}
def DWBytes7456 {
dag Word = (RLWINM DWBytes4567.Word, 24, 0, 31);
}
def DWBytes7656 {
dag Word = (RLWIMI DWBytes7456.Word, DWBytes4567.Word, 8, 8, 15);
}
// B7 B6 B5 B4 in the right order
def DWBytes7654 {
dag Word = (RLWIMI DWBytes7656.Word, DWBytes4567.Word, 8, 24, 31);
dag DWord =
(i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), Word, sub_32));
}
def DWBytes0123 {
dag Word = (i32 (EXTRACT_SUBREG (RLDICL DWSwapInByte.Swap4, 32, 32), sub_32));
}
def DWBytes3012 {
dag Word = (RLWINM DWBytes0123.Word, 24, 0, 31);
}
def DWBytes3212 {
dag Word = (RLWIMI DWBytes3012.Word, DWBytes0123.Word, 8, 8, 15);
}
// B3 B2 B1 B0 in the right order
def DWBytes3210 {
dag Word = (RLWIMI DWBytes3212.Word, DWBytes0123.Word, 8, 24, 31);
dag DWord =
(i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), Word, sub_32));
}
// Now both high word and low word are reversed, next
// swap the high word and low word.
def : Pat<(i64 (bitreverse i64:$A)),
(OR8 (RLDICR DWBytes7654.DWord, 32, 31), DWBytes3210.DWord)>;
Index: stable/12/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h (revision 356465)
@@ -1,58 +1,64 @@
//===-- RISCVRegisterInfo.h - RISCV Register Information Impl ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the RISCV implementation of the TargetRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
#define LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
#include "llvm/CodeGen/TargetRegisterInfo.h"
#define GET_REGINFO_HEADER
#include "RISCVGenRegisterInfo.inc"
namespace llvm {
struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
RISCVRegisterInfo(unsigned HwMode);
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID) const override;
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
BitVector getReservedRegs(const MachineFunction &MF) const override;
bool isConstantPhysReg(unsigned PhysReg) const override;
const uint32_t *getNoPreservedMask() const override;
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
return true;
}
bool trackLivenessAfterRegAlloc(const MachineFunction &) const override {
return true;
}
+
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override {
+ return &RISCV::GPRRegClass;
+ }
};
}
#endif
Index: stable/12/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td (revision 356465)
@@ -1,130 +1,123 @@
// WebAssemblyInstrFloat.td-WebAssembly Float codegen support ---*- tablegen -*-
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// WebAssembly Floating-point operand code-gen constructs.
///
//===----------------------------------------------------------------------===//
multiclass UnaryFP<SDNode node, string name, bits<32> f32Inst,
bits<32> f64Inst> {
defm _F32 : I<(outs F32:$dst), (ins F32:$src), (outs), (ins),
[(set F32:$dst, (node F32:$src))],
!strconcat("f32.", !strconcat(name, "\t$dst, $src")),
!strconcat("f32.", name), f32Inst>;
defm _F64 : I<(outs F64:$dst), (ins F64:$src), (outs), (ins),
[(set F64:$dst, (node F64:$src))],
!strconcat("f64.", !strconcat(name, "\t$dst, $src")),
!strconcat("f64.", name), f64Inst>;
}
multiclass BinaryFP<SDNode node, string name, bits<32> f32Inst,
bits<32> f64Inst> {
defm _F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins),
[(set F32:$dst, (node F32:$lhs, F32:$rhs))],
!strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
!strconcat("f32.", name), f32Inst>;
defm _F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins),
[(set F64:$dst, (node F64:$lhs, F64:$rhs))],
!strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
!strconcat("f64.", name), f64Inst>;
}
multiclass ComparisonFP<CondCode cond, string name, bits<32> f32Inst, bits<32> f64Inst> {
defm _F32 : I<(outs I32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins),
[(set I32:$dst, (setcc F32:$lhs, F32:$rhs, cond))],
!strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
!strconcat("f32.", name), f32Inst>;
defm _F64 : I<(outs I32:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins),
[(set I32:$dst, (setcc F64:$lhs, F64:$rhs, cond))],
!strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
!strconcat("f64.", name), f64Inst>;
}
let isCommutable = 1 in
defm ADD : BinaryFP<fadd, "add ", 0x92, 0xa0>;
defm SUB : BinaryFP<fsub, "sub ", 0x93, 0xa1>;
let isCommutable = 1 in
defm MUL : BinaryFP<fmul, "mul ", 0x94, 0xa2>;
defm DIV : BinaryFP<fdiv, "div ", 0x95, 0xa3>;
defm SQRT : UnaryFP<fsqrt, "sqrt", 0x91, 0x9f>;
defm ABS : UnaryFP<fabs, "abs ", 0x8b, 0x99>;
defm NEG : UnaryFP<fneg, "neg ", 0x8c, 0x9a>;
defm COPYSIGN : BinaryFP<fcopysign, "copysign", 0x98, 0xa6>;
let isCommutable = 1 in {
defm MIN : BinaryFP<fminimum, "min ", 0x96, 0xa4>;
defm MAX : BinaryFP<fmaximum, "max ", 0x97, 0xa5>;
} // isCommutable = 1
defm CEIL : UnaryFP<fceil, "ceil", 0x8d, 0x9b>;
defm FLOOR : UnaryFP<ffloor, "floor", 0x8e, 0x9c>;
defm TRUNC : UnaryFP<ftrunc, "trunc", 0x8f, 0x9d>;
defm NEAREST : UnaryFP<fnearbyint, "nearest", 0x90, 0x9e>;
// DAGCombine oddly folds casts into the rhs of copysign. Unfold them.
def : Pat<(fcopysign F64:$lhs, F32:$rhs),
(COPYSIGN_F64 F64:$lhs, (F64_PROMOTE_F32 F32:$rhs))>;
def : Pat<(fcopysign F32:$lhs, F64:$rhs),
(COPYSIGN_F32 F32:$lhs, (F32_DEMOTE_F64 F64:$rhs))>;
// WebAssembly doesn't expose inexact exceptions, so map frint to fnearbyint.
def : Pat<(frint f32:$src), (NEAREST_F32 f32:$src)>;
def : Pat<(frint f64:$src), (NEAREST_F64 f64:$src)>;
let isCommutable = 1 in {
defm EQ : ComparisonFP<SETOEQ, "eq ", 0x5b, 0x61>;
defm NE : ComparisonFP<SETUNE, "ne ", 0x5c, 0x62>;
} // isCommutable = 1
defm LT : ComparisonFP<SETOLT, "lt ", 0x5d, 0x63>;
defm LE : ComparisonFP<SETOLE, "le ", 0x5f, 0x65>;
defm GT : ComparisonFP<SETOGT, "gt ", 0x5e, 0x64>;
defm GE : ComparisonFP<SETOGE, "ge ", 0x60, 0x66>;
// Don't care floating-point comparisons, supported via other comparisons.
def : Pat<(seteq f32:$lhs, f32:$rhs), (EQ_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(setne f32:$lhs, f32:$rhs), (NE_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(setlt f32:$lhs, f32:$rhs), (LT_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(setle f32:$lhs, f32:$rhs), (LE_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(setgt f32:$lhs, f32:$rhs), (GT_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(setge f32:$lhs, f32:$rhs), (GE_F32 f32:$lhs, f32:$rhs)>;
def : Pat<(seteq f64:$lhs, f64:$rhs), (EQ_F64 f64:$lhs, f64:$rhs)>;
def : Pat<(setne f64:$lhs, f64:$rhs), (NE_F64 f64:$lhs, f64:$rhs)>;
def : Pat<(setlt f64:$lhs, f64:$rhs), (LT_F64 f64:$lhs, f64:$rhs)>;
def : Pat<(setle f64:$lhs, f64:$rhs), (LE_F64 f64:$lhs, f64:$rhs)>;
def : Pat<(setgt f64:$lhs, f64:$rhs), (GT_F64 f64:$lhs, f64:$rhs)>;
def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>;
defm SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond),
(outs), (ins),
[(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))],
"f32.select\t$dst, $lhs, $rhs, $cond", "f32.select", 0x1b>;
defm SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond),
(outs), (ins),
[(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))],
"f64.select\t$dst, $lhs, $rhs, $cond", "f64.select", 0x1b>;
// ISD::SELECT requires its operand to conform to getBooleanContents, but
// WebAssembly's select interprets any non-zero value as true, so we can fold
// a setne with 0 into a select.
def : Pat<(select (i32 (setne I32:$cond, 0)), F32:$lhs, F32:$rhs),
(SELECT_F32 F32:$lhs, F32:$rhs, I32:$cond)>;
def : Pat<(select (i32 (setne I32:$cond, 0)), F64:$lhs, F64:$rhs),
(SELECT_F64 F64:$lhs, F64:$rhs, I32:$cond)>;
// And again, this time with seteq instead of setne and the arms reversed.
def : Pat<(select (i32 (seteq I32:$cond, 0)), F32:$lhs, F32:$rhs),
(SELECT_F32 F32:$rhs, F32:$lhs, I32:$cond)>;
def : Pat<(select (i32 (seteq I32:$cond, 0)), F64:$lhs, F64:$rhs),
(SELECT_F64 F64:$rhs, F64:$lhs, I32:$cond)>;
-
-// The legalizer inserts an unnecessary `and 1` to make input conform
-// to getBooleanContents, which we can lower away.
-def : Pat<(select (i32 (and I32:$cond, 1)), F32:$lhs, F32:$rhs),
- (SELECT_F32 F32:$lhs, F32:$rhs, I32:$cond)>;
-def : Pat<(select (i32 (and I32:$cond, 1)), F64:$lhs, F64:$rhs),
- (SELECT_F64 F64:$lhs, F64:$rhs, I32:$cond)>;
Index: stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp (revision 356465)
@@ -1,3187 +1,3219 @@
//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of TargetFrameLowering class.
//
//===----------------------------------------------------------------------===//
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetOptions.h"
#include <cstdlib>
using namespace llvm;
X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
unsigned StackAlignOverride)
: TargetFrameLowering(StackGrowsDown, StackAlignOverride,
STI.is64Bit() ? -8 : -4),
STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
// Cache a bunch of frame-related predicates for this subtarget.
SlotSize = TRI->getSlotSize();
Is64Bit = STI.is64Bit();
IsLP64 = STI.isTarget64BitLP64();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
StackPtr = TRI->getStackRegister();
}
bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
return !MF.getFrameInfo().hasVarSizedObjects() &&
!MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
/// canSimplifyCallFramePseudos - If there is a reserved call frame, the
/// call frame pseudos can be simplified. Having a FP, as in the default
/// implementation, is not sufficient here since we can't always use it.
/// Use a more nuanced condition.
bool
X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) ||
(hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
TRI->hasBasePointer(MF);
}
// needsFrameIndexResolution - Do we need to perform FI resolution for
// this function. Normally, this is required only when the function
// has any stack objects. However, FI resolution actually has another job,
// not apparent from the title - it resolves callframesetup/destroy
// that were not simplified earlier.
// So, this is required for x86 functions that have push sequences even
// when there are no stack objects.
bool
X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
return MF.getFrameInfo().hasStackObjects() ||
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
TRI->needsStackRealignment(MF) ||
MFI.hasVarSizedObjects() ||
MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
MFI.hasStackMap() || MFI.hasPatchPoint() ||
MFI.hasCopyImplyingStackAdjustment());
}
static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
if (IsLP64) {
if (isInt<8>(Imm))
return X86::SUB64ri8;
return X86::SUB64ri32;
} else {
if (isInt<8>(Imm))
return X86::SUB32ri8;
return X86::SUB32ri;
}
}
static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
if (IsLP64) {
if (isInt<8>(Imm))
return X86::ADD64ri8;
return X86::ADD64ri32;
} else {
if (isInt<8>(Imm))
return X86::ADD32ri8;
return X86::ADD32ri;
}
}
static unsigned getSUBrrOpcode(unsigned isLP64) {
return isLP64 ? X86::SUB64rr : X86::SUB32rr;
}
static unsigned getADDrrOpcode(unsigned isLP64) {
return isLP64 ? X86::ADD64rr : X86::ADD32rr;
}
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
if (IsLP64) {
if (isInt<8>(Imm))
return X86::AND64ri8;
return X86::AND64ri32;
}
if (isInt<8>(Imm))
return X86::AND32ri8;
return X86::AND32ri;
}
static unsigned getLEArOpcode(unsigned IsLP64) {
return IsLP64 ? X86::LEA64r : X86::LEA32r;
}
/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
/// when it reaches the "return" instruction. We can then pop a stack object
/// to this register without worry about clobbering it.
static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const X86RegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
if (MF->callsEHReturn())
return 0;
const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
if (MBBI == MBB.end())
return 0;
switch (MBBI->getOpcode()) {
default: return 0;
case TargetOpcode::PATCHABLE_RET:
case X86::RET:
case X86::RETL:
case X86::RETQ:
case X86::RETIL:
case X86::RETIQ:
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64:
case X86::EH_RETURN:
case X86::EH_RETURN64: {
SmallSet<uint16_t, 8> Uses;
for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MBBI->getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
}
for (auto CS : AvailableRegs)
if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP &&
CS != X86::ESP)
return CS;
}
}
return 0;
}
static bool isEAXLiveIn(MachineBasicBlock &MBB) {
for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
unsigned Reg = RegMask.PhysReg;
if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
Reg == X86::AH || Reg == X86::AL)
return true;
}
return false;
}
/// Check if the flags need to be preserved before the terminators.
/// This would be the case, if the eflags is live-in of the region
/// composed by the terminators or live-out of that region, without
/// being defined by a terminator.
static bool
flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
for (const MachineInstr &MI : MBB.terminators()) {
bool BreakNext = false;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (Reg != X86::EFLAGS)
continue;
// This terminator needs an eflags that is not defined
// by a previous another terminator:
// EFLAGS is live-in of the region composed by the terminators.
if (!MO.isDef())
return true;
// This terminator defines the eflags, i.e., we don't need to preserve it.
// However, we still need to check this specific terminator does not
// read a live-in value.
BreakNext = true;
}
// We found a definition of the eflags, no need to preserve them.
if (BreakNext)
return false;
}
// None of the terminators use or define the eflags.
// Check if they are live-out, that would imply we need to preserve them.
for (const MachineBasicBlock *Succ : MBB.successors())
if (Succ->isLiveIn(X86::EFLAGS))
return true;
return false;
}
/// emitSPUpdate - Emit a series of instructions to increment / decrement the
/// stack pointer by a constant value.
void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &DL,
int64_t NumBytes, bool InEpilogue) const {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
MachineInstr::MIFlag Flag =
isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
uint64_t Chunk = (1LL << 31) - 1;
if (Offset > Chunk) {
// Rather than emit a long series of instructions for large offsets,
// load the offset into a register and do one sub/add
unsigned Reg = 0;
unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
if (isSub && !isEAXLiveIn(MBB))
Reg = Rax;
else
Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
unsigned AddSubRROpc =
isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
if (Reg) {
BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
.addImm(Offset)
.setMIFlag(Flag);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
.addReg(StackPtr)
.addReg(Reg);
MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
return;
} else if (Offset > 8 * Chunk) {
// If we would need more than 8 add or sub instructions (a >16GB stack
// frame), it's worth spilling RAX to materialize this immediate.
// pushq %rax
// movabsq +-$Offset+-SlotSize, %rax
// addq %rsp, %rax
// xchg %rax, (%rsp)
// movq (%rsp), %rsp
assert(Is64Bit && "can't have 32-bit 16GB stack frame");
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
.addReg(Rax, RegState::Kill)
.setMIFlag(Flag);
// Subtract is not commutative, so negate the offset and always use add.
// Subtract 8 less and add 8 more to account for the PUSH we just did.
if (isSub)
Offset = -(Offset - SlotSize);
else
Offset = Offset + SlotSize;
BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
.addImm(Offset)
.setMIFlag(Flag);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
.addReg(Rax)
.addReg(StackPtr);
MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
// Exchange the new SP in RAX with the top of the stack.
addRegOffset(
BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
StackPtr, false, 0);
// Load new SP from the top of the stack into RSP.
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
StackPtr, false, 0);
return;
}
}
while (Offset) {
uint64_t ThisVal = std::min(Offset, Chunk);
if (ThisVal == SlotSize) {
// Use push / pop for slot sized adjustments as a size optimization. We
// need to find a dead register when using pop.
unsigned Reg = isSub
? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
: findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
if (Reg) {
unsigned Opc = isSub
? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
: (Is64Bit ? X86::POP64r : X86::POP32r);
BuildMI(MBB, MBBI, DL, TII.get(Opc))
.addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
.setMIFlag(Flag);
Offset -= ThisVal;
continue;
}
}
BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
.setMIFlag(Flag);
Offset -= ThisVal;
}
}
MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
assert(Offset != 0 && "zero offset stack adjustment requested");
// On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
// is tricky.
bool UseLEA;
if (!InEpilogue) {
// Check if inserting the prologue at the beginning
// of MBB would require to use LEA operations.
// We need to use LEA operations if EFLAGS is live in, because
// it means an instruction will read it before it gets defined.
UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
} else {
// If we can use LEA for SP but we shouldn't, check that none
// of the terminators uses the eflags. Otherwise we will insert
// a ADD that will redefine the eflags and break the condition.
// Alternatively, we could move the ADD, but this may not be possible
// and is an optimization anyway.
UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
if (UseLEA && !STI.useLeaForSP())
UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
// If that assert breaks, that means we do not do the right thing
// in canUseAsEpilogue.
assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
"We shouldn't have allowed this insertion point");
}
MachineInstrBuilder MI;
if (UseLEA) {
MI = addRegOffset(BuildMI(MBB, MBBI, DL,
TII.get(getLEArOpcode(Uses64BitFramePtr)),
StackPtr),
StackPtr, false, Offset);
} else {
bool IsSub = Offset < 0;
uint64_t AbsOffset = IsSub ? -Offset : Offset;
unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
: getADDriOpcode(Uses64BitFramePtr, AbsOffset);
MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr)
.addImm(AbsOffset);
MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
}
return MI;
}
int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
bool doMergeWithPrevious) const {
if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
(!doMergeWithPrevious && MBBI == MBB.end()))
return 0;
MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
PI = skipDebugInstructionsBackward(PI, MBB.begin());
// It is assumed that ADD/SUB/LEA instruction is succeded by one CFI
// instruction, and that there are no DBG_VALUE or other instructions between
// ADD/SUB/LEA and its corresponding CFI instruction.
/* TODO: Add support for the case where there are multiple CFI instructions
below the ADD/SUB/LEA, e.g.:
...
add
cfi_def_cfa_offset
cfi_offset
...
*/
if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
PI = std::prev(PI);
unsigned Opc = PI->getOpcode();
int Offset = 0;
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
PI->getOperand(0).getReg() == StackPtr){
assert(PI->getOperand(1).getReg() == StackPtr);
Offset = PI->getOperand(2).getImm();
} else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
PI->getOperand(0).getReg() == StackPtr &&
PI->getOperand(1).getReg() == StackPtr &&
PI->getOperand(2).getImm() == 1 &&
PI->getOperand(3).getReg() == X86::NoRegister &&
PI->getOperand(5).getReg() == X86::NoRegister) {
// For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
Offset = PI->getOperand(4).getImm();
} else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
PI->getOperand(0).getReg() == StackPtr) {
assert(PI->getOperand(1).getReg() == StackPtr);
Offset = -PI->getOperand(2).getImm();
} else
return 0;
PI = MBB.erase(PI);
if (PI != MBB.end() && PI->isCFIInstruction()) PI = MBB.erase(PI);
if (!doMergeWithPrevious)
MBBI = skipDebugInstructionsForward(PI, MBB.end());
return Offset;
}
void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL,
const MCCFIInstruction &CFIInst) const {
MachineFunction &MF = *MBB.getParent();
unsigned CFIIndex = MF.addFrameInst(CFIInst);
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
void X86FrameLowering::emitCalleeSavedFrameMoves(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
if (CSI.empty()) return;
// Calculate offsets.
for (std::vector<CalleeSavedInfo>::const_iterator
I = CSI.begin(), E = CSI.end(); I != E; ++I) {
int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
unsigned Reg = I->getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
BuildCFI(MBB, MBBI, DL,
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
}
}
void X86FrameLowering::emitStackProbe(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
if (InProlog) {
emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
} else {
emitStackProbeInline(MF, MBB, MBBI, DL, false);
}
} else {
emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
}
}
void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const {
const StringRef ChkStkStubSymbol = "__chkstk_stub";
MachineInstr *ChkStkStub = nullptr;
for (MachineInstr &MI : PrologMBB) {
if (MI.isCall() && MI.getOperand(0).isSymbol() &&
ChkStkStubSymbol == MI.getOperand(0).getSymbolName()) {
ChkStkStub = &MI;
break;
}
}
if (ChkStkStub != nullptr) {
assert(!ChkStkStub->isBundled() &&
"Not expecting bundled instructions here");
MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
assert(std::prev(MBBI) == ChkStkStub &&
"MBBI expected after __chkstk_stub.");
DebugLoc DL = PrologMBB.findDebugLoc(MBBI);
emitStackProbeInline(MF, PrologMBB, MBBI, DL, true);
ChkStkStub->eraseFromParent();
}
}
void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL,
bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
assert(STI.is64Bit() && "different expansion needed for 32 bit");
assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
const TargetInstrInfo &TII = *STI.getInstrInfo();
const BasicBlock *LLVM_BB = MBB.getBasicBlock();
// RAX contains the number of bytes of desired stack adjustment.
// The handling here assumes this value has already been updated so as to
// maintain stack alignment.
//
// We need to exit with RSP modified by this amount and execute suitable
// page touches to notify the OS that we're growing the stack responsibly.
// All stack probing must be done without modifying RSP.
//
// MBB:
// SizeReg = RAX;
// ZeroReg = 0
// CopyReg = RSP
// Flags, TestReg = CopyReg - SizeReg
// FinalReg = !Flags.Ovf ? TestReg : ZeroReg
// LimitReg = gs magic thread env access
// if FinalReg >= LimitReg goto ContinueMBB
// RoundBB:
// RoundReg = page address of FinalReg
// LoopMBB:
// LoopReg = PHI(LimitReg,ProbeReg)
// ProbeReg = LoopReg - PageSize
// [ProbeReg] = 0
// if (ProbeReg > RoundReg) goto LoopMBB
// ContinueMBB:
// RSP = RSP - RAX
// [rest of original MBB]
// Set up the new basic blocks
MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
MF.insert(MBBIter, RoundMBB);
MF.insert(MBBIter, LoopMBB);
MF.insert(MBBIter, ContinueMBB);
// Split MBB and move the tail portion down to ContinueMBB.
MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
// Some useful constants
const int64_t ThreadEnvironmentStackLimit = 0x10;
const int64_t PageSize = 0x1000;
const int64_t PageMask = ~(PageSize - 1);
// Registers we need. For the normal case we use virtual
// registers. For the prolog expansion we use RAX, RCX and RDX.
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterClass *RegClass = &X86::GR64RegClass;
const Register SizeReg = InProlog ? X86::RAX
: MRI.createVirtualRegister(RegClass),
ZeroReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
CopyReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
TestReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
FinalReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
RoundedReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
LimitReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
JoinReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
ProbeReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass);
// SP-relative offsets where we can save RCX and RDX.
int64_t RCXShadowSlot = 0;
int64_t RDXShadowSlot = 0;
// If inlining in the prolog, save RCX and RDX.
if (InProlog) {
// Compute the offsets. We need to account for things already
// pushed onto the stack at this point: return address, frame
// pointer (if used), and callee saves.
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
const bool HasFP = hasFP(MF);
// Check if we need to spill RCX and/or RDX.
// Here we assume that no earlier prologue instruction changes RCX and/or
// RDX, so checking the block live-ins is enough.
const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
// Assign the initial slot to both registers, then change RDX's slot if both
// need to be spilled.
if (IsRCXLiveIn)
RCXShadowSlot = InitSlot;
if (IsRDXLiveIn)
RDXShadowSlot = InitSlot;
if (IsRDXLiveIn && IsRCXLiveIn)
RDXShadowSlot += 8;
// Emit the saves if needed.
if (IsRCXLiveIn)
addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
RCXShadowSlot)
.addReg(X86::RCX);
if (IsRDXLiveIn)
addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
RDXShadowSlot)
.addReg(X86::RDX);
} else {
// Not in the prolog. Copy RAX to a virtual reg.
BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
}
// Add code to MBB to check for overflow and set the new target stack pointer
// to zero if so.
BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
.addReg(ZeroReg, RegState::Undef)
.addReg(ZeroReg, RegState::Undef);
BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
.addReg(CopyReg)
.addReg(SizeReg);
BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
.addReg(TestReg)
.addReg(ZeroReg)
.addImm(X86::COND_B);
// FinalReg now holds final stack pointer value, or zero if
// allocation would overflow. Compare against the current stack
// limit from the thread environment block. Note this limit is the
// lowest touched page on the stack, not the point at which the OS
// will cause an overflow exception, so this is just an optimization
// to avoid unnecessarily touching pages that are below the current
// SP but already committed to the stack by the OS.
BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
.addReg(0)
.addImm(1)
.addReg(0)
.addImm(ThreadEnvironmentStackLimit)
.addReg(X86::GS);
BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
// Jump if the desired stack pointer is at or above the stack limit.
BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE);
// Add code to roundMBB to round the final stack pointer to a page boundary.
RoundMBB->addLiveIn(FinalReg);
BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
.addReg(FinalReg)
.addImm(PageMask);
BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
// LimitReg now holds the current stack limit, RoundedReg page-rounded
// final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
// and probe until we reach RoundedReg.
if (!InProlog) {
BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
.addReg(LimitReg)
.addMBB(RoundMBB)
.addReg(ProbeReg)
.addMBB(LoopMBB);
}
LoopMBB->addLiveIn(JoinReg);
addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
false, -PageSize);
// Probe by storing a byte onto the stack.
BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
.addReg(ProbeReg)
.addImm(1)
.addReg(0)
.addImm(0)
.addReg(0)
.addImm(0);
LoopMBB->addLiveIn(RoundedReg);
BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
.addReg(RoundedReg)
.addReg(ProbeReg);
BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE);
MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
// If in prolog, restore RDX and RCX.
if (InProlog) {
if (RCXShadowSlot) // It means we spilled RCX in the prologue.
addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
TII.get(X86::MOV64rm), X86::RCX),
X86::RSP, false, RCXShadowSlot);
if (RDXShadowSlot) // It means we spilled RDX in the prologue.
addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
TII.get(X86::MOV64rm), X86::RDX),
X86::RSP, false, RDXShadowSlot);
}
// Now that the probing is done, add code to continueMBB to update
// the stack pointer for real.
ContinueMBB->addLiveIn(SizeReg);
BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
.addReg(X86::RSP)
.addReg(SizeReg);
// Add the control flow edges we need.
MBB.addSuccessor(ContinueMBB);
MBB.addSuccessor(RoundMBB);
RoundMBB->addSuccessor(LoopMBB);
LoopMBB->addSuccessor(ContinueMBB);
LoopMBB->addSuccessor(LoopMBB);
// Mark all the instructions added to the prolog as frame setup.
if (InProlog) {
for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
BeforeMBBI->setFlag(MachineInstr::FrameSetup);
}
for (MachineInstr &MI : *RoundMBB) {
MI.setFlag(MachineInstr::FrameSetup);
}
for (MachineInstr &MI : *LoopMBB) {
MI.setFlag(MachineInstr::FrameSetup);
}
for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
CMBBI != ContinueMBBI; ++CMBBI) {
CMBBI->setFlag(MachineInstr::FrameSetup);
}
}
}
void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL,
bool InProlog) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
// FIXME: Add retpoline support and remove this.
if (Is64Bit && IsLargeCodeModel && STI.useRetpolineIndirectCalls())
report_fatal_error("Emitting stack probe calls on 64-bit with the large "
"code model and retpoline not yet implemented.");
unsigned CallOp;
if (Is64Bit)
CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
else
CallOp = X86::CALLpcrel32;
StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
MachineInstrBuilder CI;
MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
// All current stack probes take AX and SP as input, clobber flags, and
// preserve all registers. x86_64 probes leave RSP unmodified.
if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
// For the large code model, we have to call through a register. Use R11,
// as it is scratch in all supported calling conventions.
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
.addExternalSymbol(MF.createExternalSymbolName(Symbol));
CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
} else {
CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
.addExternalSymbol(MF.createExternalSymbolName(Symbol));
}
unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP;
CI.addReg(AX, RegState::Implicit)
.addReg(SP, RegState::Implicit)
.addReg(AX, RegState::Define | RegState::Implicit)
.addReg(SP, RegState::Define | RegState::Implicit)
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
if (STI.isTargetWin64() || !STI.isOSWindows()) {
// MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
// MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
// themselves. They also does not clobber %rax so we can reuse it when
// adjusting %rsp.
// All other platforms do not specify a particular ABI for the stack probe
// function, so we arbitrarily define it to not adjust %esp/%rsp itself.
BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP)
.addReg(SP)
.addReg(AX);
}
if (InProlog) {
// Apply the frame setup flag to all inserted instrs.
for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
}
void X86FrameLowering::emitStackProbeInlineStub(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
assert(InProlog && "ChkStkStub called outside prolog!");
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
// Win64 ABI has a less restrictive limitation of 240; 128 works equally well
// and might require smaller successive adjustments.
const uint64_t Win64MaxSEHOffset = 128;
uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
// Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
return SEHFrameOffset & -16;
}
// If we're forcing a stack realignment we can't rely on just the frame
// info, we need to know the ABI stack alignment as well in case we
// have a call out. Otherwise just make sure we have some alignment - we'll
// go with the minimum SlotSize.
uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
unsigned StackAlign = getStackAlignment();
if (MF.getFunction().hasFnAttribute("stackrealign")) {
if (MFI.hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
else if (MaxAlign < SlotSize)
MaxAlign = SlotSize;
}
return MaxAlign;
}
void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned Reg,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
.addReg(Reg)
.addImm(Val)
.setMIFlag(MachineInstr::FrameSetup);
// The EFLAGS implicit def is dead.
MI->getOperand(3).setIsDead();
}
bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
// x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
// clobbered by any interrupt handler.
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
"MF used frame lowering for wrong subtarget");
const Function &Fn = MF.getFunction();
const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone);
}
/// emitPrologue - Push callee-saved registers onto the stack, which
/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
/// space for local variables. Also emit labels used by the exception handler to
/// generate the exception handling frames.
/*
Here's a gist of what gets emitted:
; Establish frame pointer, if needed
[if needs FP]
push %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
.seh_pushreg %rpb
mov %rsp, %rbp
.cfi_def_cfa_register %rbp
; Spill general-purpose registers
[for all callee-saved GPRs]
pushq %<reg>
[if not needs FP]
.cfi_def_cfa_offset (offset from RETADDR)
.seh_pushreg %<reg>
; If the required stack alignment > default stack alignment
; rsp needs to be re-aligned. This creates a "re-alignment gap"
; of unknown size in the stack frame.
[if stack needs re-alignment]
and $MASK, %rsp
; Allocate space for locals
[if target is Windows and allocated space > 4096 bytes]
; Windows needs special care for allocations larger
; than one page.
mov $NNN, %rax
call ___chkstk_ms/___chkstk
sub %rax, %rsp
[else]
sub $NNN, %rsp
[if needs FP]
.seh_stackalloc (size of XMM spill slots)
.seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
[else]
.seh_stackalloc NNN
; Spill XMMs
; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
; they may get spilled on any platform, if the current function
; calls @llvm.eh.unwind.init
[if needs FP]
[for all callee-saved XMM registers]
movaps %<xmm reg>, -MMM(%rbp)
[for all callee-saved XMM registers]
.seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
; i.e. the offset relative to (%rbp - SEHFrameOffset)
[else]
[for all callee-saved XMM registers]
movaps %<xmm reg>, KKK(%rsp)
[for all callee-saved XMM registers]
.seh_savexmm %<xmm reg>, KKK
.seh_endprologue
[if needs base pointer]
mov %rsp, %rbx
[if needs to restore base pointer]
mov %rsp, -MMM(%rbp)
; Emit CFI info
[if needs FP]
[for all callee-saved registers]
.cfi_offset %<reg>, (offset from %rbp)
[else]
.cfi_def_cfa_offset (offset from RETADDR)
[for all callee-saved registers]
.cfi_offset %<reg>, (offset from %rsp)
Notes:
- .seh directives are emitted only for Windows 64 ABI
- .cv_fpo directives are emitted on win32 when emitting CodeView
- .cfi directives are emitted for all other ABIs
- for 32-bit code, substitute %e?? registers for %r??
*/
void X86FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
"MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo &MFI = MF.getFrameInfo();
const Function &Fn = MF.getFunction();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
if (Fn.hasPersonalityFn())
Personality = classifyEHPersonality(Fn.getPersonalityFn());
bool FnHasClrFunclet =
MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
bool HasFP = hasFP(MF);
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
// FIXME: Emit FPO data for EH funclets.
bool NeedsWinFPO =
!IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
bool NeedsDwarfCFI =
!IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
unsigned BasePtr = TRI->getBaseRegister();
bool HasWinCFI = false;
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc DL;
// Add RETADDR move area to callee saved frame size.
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta && IsWin64Prologue)
report_fatal_error("Can't handle guaranteed tail call under win64 yet");
if (TailCallReturnAddrDelta < 0)
X86FI->setCalleeSavedFrameSize(
X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
bool UseStackProbe = !STI.getTargetLowering()->getStackProbeSymbolName(MF).empty();
// The default stack probe size is 4096 if the function has no stackprobesize
// attribute.
unsigned StackProbeSize = 4096;
if (Fn.hasFnAttribute("stack-probe-size"))
Fn.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
// Re-align the stack on 64-bit if the x86-interrupt calling convention is
// used and an error code was pushed, since the x86-64 ABI requires a 16-byte
// stack alignment.
if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
Fn.arg_size() == 2) {
StackSize += 8;
MFI.setStackSize(StackSize);
emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false);
}
// If this is x86-64 and the Red Zone is not disabled, if we are a leaf
// function, and use up to 128 bytes of stack space, don't have a frame
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
if (has128ByteRedZone(MF) &&
!TRI->needsStackRealignment(MF) &&
!MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
!UseStackProbe && // No stack probes.
!MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI.setStackSize(StackSize);
}
// Insert stack pointer adjustment for later moving of return addr. Only
// applies to tail call optimized functions where the callee argument stack
// size is bigger than the callers.
if (TailCallReturnAddrDelta < 0) {
BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
/*InEpilogue=*/false)
.setMIFlag(MachineInstr::FrameSetup);
}
// Mapping for machine moves:
//
// DST: VirtualFP AND
// SRC: VirtualFP => DW_CFA_def_cfa_offset
// ELSE => DW_CFA_def_cfa
//
// SRC: VirtualFP AND
// DST: Register => DW_CFA_def_cfa_register
//
// ELSE
// OFFSET < 0 => DW_CFA_offset_extended_sf
// REG < 64 => DW_CFA_offset + Reg
// ELSE => DW_CFA_offset_extended
uint64_t NumBytes = 0;
int stackGrowth = -SlotSize;
// Find the funclet establisher parameter
unsigned Establisher = X86::NoRegister;
if (IsClrFunclet)
Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
else if (IsFunclet)
Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
// Immediately spill establisher into the home slot.
// The runtime cares about this.
// MOV64mr %rdx, 16(%rsp)
unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
.addReg(Establisher)
.setMIFlag(MachineInstr::FrameSetup);
MBB.addLiveIn(Establisher);
}
if (HasFP) {
assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
// If required, include space for extra hidden slot for stashing base pointer.
if (X86FI->getRestoreBasePointer())
FrameSize += SlotSize;
NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
NumBytes = alignTo(NumBytes, MaxAlign);
// Save EBP/RBP into the appropriate stack slot.
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
.addReg(MachineFramePtr, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
if (NeedsDwarfCFI) {
// Mark the place where EBP/RBP was saved.
// Define the current CFA rule to use the provided offset.
assert(StackSize);
BuildCFI(MBB, MBBI, DL,
MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
// Change the rule for the FramePtr to be an "offset" rule.
unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
nullptr, DwarfFramePtr, 2 * stackGrowth));
}
if (NeedsWinCFI) {
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
.addImm(FramePtr)
.setMIFlag(MachineInstr::FrameSetup);
}
if (!IsWin64Prologue && !IsFunclet) {
// Update EBP with the new base value.
BuildMI(MBB, MBBI, DL,
TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
FramePtr)
.addReg(StackPtr)
.setMIFlag(MachineInstr::FrameSetup);
if (NeedsDwarfCFI) {
// Mark effective beginning of when frame pointer becomes valid.
// Define the current CFA to use the EBP/RBP register.
unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
nullptr, DwarfFramePtr));
}
if (NeedsWinFPO) {
// .cv_fpo_setframe $FramePtr
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
.addImm(FramePtr)
.addImm(0)
.setMIFlag(MachineInstr::FrameSetup);
}
}
} else {
assert(!IsFunclet && "funclets without FPs not yet implemented");
NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
}
// Update the offset adjustment, which is mainly used by codeview to translate
// from ESP to VFRAME relative local variable offsets.
if (!IsFunclet) {
if (HasFP && TRI->needsStackRealignment(MF))
MFI.setOffsetAdjustment(-NumBytes);
else
MFI.setOffsetAdjustment(-StackSize);
}
// For EH funclets, only allocate enough space for outgoing calls. Save the
// NumBytes value that we would've used for the parent frame.
unsigned ParentFrameNumBytes = NumBytes;
if (IsFunclet)
NumBytes = getWinEHFuncletFrameSize(MF);
// Skip the callee-saved push instructions.
bool PushedRegs = false;
int StackOffset = 2 * stackGrowth;
while (MBBI != MBB.end() &&
MBBI->getFlag(MachineInstr::FrameSetup) &&
(MBBI->getOpcode() == X86::PUSH32r ||
MBBI->getOpcode() == X86::PUSH64r)) {
PushedRegs = true;
unsigned Reg = MBBI->getOperand(0).getReg();
++MBBI;
if (!HasFP && NeedsDwarfCFI) {
// Mark callee-saved push instruction.
// Define the current CFA rule to use the provided offset.
assert(StackSize);
BuildCFI(MBB, MBBI, DL,
MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
StackOffset += stackGrowth;
}
if (NeedsWinCFI) {
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
.addImm(Reg)
.setMIFlag(MachineInstr::FrameSetup);
}
}
// Realign stack after we pushed callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
// Don't do this for Win64, it needs to realign the stack after the prologue.
if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
if (NeedsWinCFI) {
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign))
.addImm(MaxAlign)
.setMIFlag(MachineInstr::FrameSetup);
}
}
// If there is an SUB32ri of ESP immediately before this instruction, merge
// the two. This can be the case when tail call elimination is enabled and
// the callee has more arguments then the caller.
NumBytes -= mergeSPUpdates(MBB, MBBI, true);
// Adjust stack pointer: ESP -= numbytes.
// Windows and cygwin/mingw require a prologue helper routine when allocating
// more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
// uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
// stack and adjust the stack pointer in one go. The 64-bit version of
// __chkstk is only responsible for probing the stack. The 64-bit prologue is
// responsible for adjusting the stack pointer. Touching the stack at 4K
// increments is necessary to ensure that the guard pages used by the OS
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
assert(!X86FI->getUsesRedZone() &&
"The Red Zone is not accounted for in stack probes");
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
if (isEAXAlive) {
if (Is64Bit) {
// Save RAX
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
.addReg(X86::RAX, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
} else {
// Save EAX
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
.addReg(X86::EAX, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
}
}
if (Is64Bit) {
// Handle the 64-bit Windows ABI case where we need to call __chkstk.
// Function prologue is responsible for adjusting the stack pointer.
int Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
if (isUInt<32>(Alloc)) {
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
.addImm(Alloc)
.setMIFlag(MachineInstr::FrameSetup);
} else if (isInt<32>(Alloc)) {
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
.addImm(Alloc)
.setMIFlag(MachineInstr::FrameSetup);
} else {
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
.addImm(Alloc)
.setMIFlag(MachineInstr::FrameSetup);
}
} else {
// Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
// We'll also use 4 already allocated bytes for EAX.
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
.addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
.setMIFlag(MachineInstr::FrameSetup);
}
// Call __chkstk, __chkstk_ms, or __alloca.
emitStackProbe(MF, MBB, MBBI, DL, true);
if (isEAXAlive) {
// Restore RAX/EAX
MachineInstr *MI;
if (Is64Bit)
MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
StackPtr, false, NumBytes - 8);
else
MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
StackPtr, false, NumBytes - 4);
MI->setFlag(MachineInstr::FrameSetup);
MBB.insert(MBBI, MI);
}
} else if (NumBytes) {
emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
}
if (NeedsWinCFI && NumBytes) {
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
.addImm(NumBytes)
.setMIFlag(MachineInstr::FrameSetup);
}
int SEHFrameOffset = 0;
unsigned SPOrEstablisher;
if (IsFunclet) {
if (IsClrFunclet) {
// The establisher parameter passed to a CLR funclet is actually a pointer
// to the (mostly empty) frame of its nearest enclosing funclet; we have
// to find the root function establisher frame by loading the PSPSym from
// the intermediate frame.
unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
MachinePointerInfo NoInfo;
MBB.addLiveIn(Establisher);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
Establisher, false, PSPSlotOffset)
.addMemOperand(MF.getMachineMemOperand(
NoInfo, MachineMemOperand::MOLoad, SlotSize, SlotSize));
;
// Save the root establisher back into the current funclet's (mostly
// empty) frame, in case a sub-funclet or the GC needs it.
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
false, PSPSlotOffset)
.addReg(Establisher)
.addMemOperand(
MF.getMachineMemOperand(NoInfo, MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile,
SlotSize, SlotSize));
}
SPOrEstablisher = Establisher;
} else {
SPOrEstablisher = StackPtr;
}
if (IsWin64Prologue && HasFP) {
// Set RBP to a small fixed offset from RSP. In the funclet case, we base
// this calculation on the incoming establisher, which holds the value of
// RSP from the parent frame at the end of the prologue.
SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
if (SEHFrameOffset)
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
SPOrEstablisher, false, SEHFrameOffset);
else
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
.addReg(SPOrEstablisher);
// If this is not a funclet, emit the CFI describing our frame pointer.
if (NeedsWinCFI && !IsFunclet) {
assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
.addImm(FramePtr)
.addImm(SEHFrameOffset)
.setMIFlag(MachineInstr::FrameSetup);
if (isAsynchronousEHPersonality(Personality))
MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
}
} else if (IsFunclet && STI.is32Bit()) {
// Reset EBP / ESI to something good for funclets.
MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
// If we're a catch funclet, we can be returned to via catchret. Save ESP
// into the registration node so that the runtime will restore it for us.
if (!MBB.isCleanupFuncletEntry()) {
assert(Personality == EHPersonality::MSVC_CXX);
unsigned FrameReg;
int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg);
// ESP is the first field, so no extra displacement is needed.
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
false, EHRegOffset)
.addReg(X86::ESP);
}
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
const MachineInstr &FrameInstr = *MBBI;
++MBBI;
if (NeedsWinCFI) {
int FI;
if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
if (X86::FR64RegClass.contains(Reg)) {
+ int Offset;
unsigned IgnoredFrameReg;
- int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
- Offset += SEHFrameOffset;
+ if (IsWin64Prologue && IsFunclet)
+ Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg);
+ else
+ Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg) +
+ SEHFrameOffset;
HasWinCFI = true;
assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
.addImm(Reg)
.addImm(Offset)
.setMIFlag(MachineInstr::FrameSetup);
}
}
}
}
if (NeedsWinCFI && HasWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
.setMIFlag(MachineInstr::FrameSetup);
if (FnHasClrFunclet && !IsFunclet) {
// Save the so-called Initial-SP (i.e. the value of the stack pointer
// immediately after the prolog) into the PSPSlot so that funclets
// and the GC can recover it.
unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
auto PSPInfo = MachinePointerInfo::getFixedStack(
MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
PSPSlotOffset)
.addReg(StackPtr)
.addMemOperand(MF.getMachineMemOperand(
PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
SlotSize, SlotSize));
}
// Realign stack after we spilled callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
// Win64 requires aligning the stack after the prologue.
if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
}
// We already dealt with stack realignment and funclets above.
if (IsFunclet && STI.is32Bit())
return;
// If we need a base pointer, set it up here. It's whatever the value
// of the stack pointer is at this point. Any variable size objects
// will be allocated after this, so we can still use the base pointer
// to reference locals.
if (TRI->hasBasePointer(MF)) {
// Update the base pointer with the current stack pointer.
unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
.addReg(SPOrEstablisher)
.setMIFlag(MachineInstr::FrameSetup);
if (X86FI->getRestoreBasePointer()) {
// Stash value of base pointer. Saving RSP instead of EBP shortens
// dependence chain. Used by SjLj EH.
unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
FramePtr, true, X86FI->getRestoreBasePointerOffset())
.addReg(SPOrEstablisher)
.setMIFlag(MachineInstr::FrameSetup);
}
if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
// Stash the value of the frame pointer relative to the base pointer for
// Win32 EH. This supports Win32 EH, which does the inverse of the above:
// it recovers the frame pointer from the base pointer rather than the
// other way around.
unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
unsigned UsedReg;
int Offset =
getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
assert(UsedReg == BasePtr);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
.addReg(FramePtr)
.setMIFlag(MachineInstr::FrameSetup);
}
}
if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
// Mark end of stack pointer adjustment.
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
assert(StackSize);
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
nullptr, -StackSize + stackGrowth));
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
// X86 Interrupt handling function cannot assume anything about the direction
// flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
// in each prologue of interrupt handler function.
//
// FIXME: Create "cld" instruction only in these cases:
// 1. The interrupt handling function uses any of the "rep" instructions.
// 2. Interrupt handling function calls another function.
//
if (Fn.getCallingConv() == CallingConv::X86_INTR)
BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
.setMIFlag(MachineInstr::FrameSetup);
// At this point we know if the function has WinCFI or not.
MF.setHasWinCFI(HasWinCFI);
}
bool X86FrameLowering::canUseLEAForSPInEpilogue(
const MachineFunction &MF) const {
// We can't use LEA instructions for adjusting the stack pointer if we don't
// have a frame pointer in the Win64 ABI. Only ADD instructions may be used
// to deallocate the stack.
// This means that we can use LEA for SP in two situations:
// 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
// 2. We *have* a frame pointer which means we are permitted to use LEA.
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
static bool isFuncletReturnInstr(MachineInstr &MI) {
switch (MI.getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
default:
return false;
}
llvm_unreachable("impossible");
}
// CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
// stack. It holds a pointer to the bottom of the root function frame. The
// establisher frame pointer passed to a nested funclet may point to the
// (mostly empty) frame of its parent funclet, but it will need to find
// the frame of the root function to access locals. To facilitate this,
// every funclet copies the pointer to the bottom of the root function
// frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
// same offset for the PSPSym in the root function frame that's used in the
// funclets' frames allows each funclet to dynamically accept any ancestor
// frame as its establisher argument (the runtime doesn't guarantee the
// immediate parent for some reason lost to history), and also allows the GC,
// which uses the PSPSym for some bookkeeping, to find it in any funclet's
// frame with only a single offset reported for the entire method.
unsigned
X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
unsigned SPReg;
int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
/*IgnoreSPUpdates*/ true);
assert(Offset >= 0 && SPReg == TRI->getStackRegister());
return static_cast<unsigned>(Offset);
}
unsigned
X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
// This is the size of the pushed CSRs.
- unsigned CSSize =
- MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
+ // This is the size of callee saved XMMs.
+ const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
+ unsigned XMMSize = WinEHXMMSlotInfo.size() *
+ TRI->getSpillSize(X86::VR128RegClass);
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
classifyEHPersonality(MF.getFunction().getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
// CLR funclets need to hold enough space to include the PSPSym, at the
// same offset from the stack pointer (immediately after the prolog) as it
// resides at in the main function.
UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
} else {
// Other funclets just need enough stack for outgoing call arguments.
UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
}
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
- return FrameSizeMinusRBP - CSSize;
+ return FrameSizeMinusRBP + XMMSize - CSSize;
}
static bool isTailCallOpcode(unsigned Opc) {
return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
Opc == X86::TCRETURNmi ||
Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
Opc == X86::TCRETURNmi64;
}
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
MachineBasicBlock::iterator MBBI = Terminator;
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
const bool Is64BitILP32 = STI.isTarget64BitILP32();
unsigned FramePtr = TRI->getFrameRegister(MF);
unsigned MachineFramePtr =
Is64BitILP32 ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWin64CFI =
IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
// Get the number of bytes to allocate from the FrameInfo.
uint64_t StackSize = MFI.getStackSize();
uint64_t MaxAlign = calculateMaxStackAlign(MF);
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
bool HasFP = hasFP(MF);
uint64_t NumBytes = 0;
bool NeedsDwarfCFI =
(!MF.getTarget().getTargetTriple().isOSDarwin() &&
!MF.getTarget().getTargetTriple().isOSWindows()) &&
(MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
if (IsFunclet) {
assert(HasFP && "EH funclets without FP not yet implemented");
NumBytes = getWinEHFuncletFrameSize(MF);
} else if (HasFP) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
NumBytes = FrameSize - CSSize;
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
NumBytes = alignTo(FrameSize, MaxAlign);
} else {
NumBytes = StackSize - CSSize;
}
uint64_t SEHStackAllocAmt = NumBytes;
if (HasFP) {
// Pop EBP.
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
MachineFramePtr)
.setMIFlag(MachineInstr::FrameDestroy);
if (NeedsDwarfCFI) {
unsigned DwarfStackPtr =
TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfa(
nullptr, DwarfStackPtr, -SlotSize));
--MBBI;
}
}
MachineBasicBlock::iterator FirstCSPop = MBBI;
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PI = std::prev(MBBI);
unsigned Opc = PI->getOpcode();
if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
(Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)))
break;
FirstCSPop = PI;
}
--MBBI;
}
MBBI = FirstCSPop;
if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
// If there is an ADD32ri or SUB32ri of ESP immediately before this
// instruction, merge the two instructions.
if (NumBytes || MFI.hasVarSizedObjects())
NumBytes += mergeSPUpdates(MBB, MBBI, true);
// If dynamic alloca is used, then reset esp to point to the last callee-saved
// slot before popping them off! Same applies for the case, when stack was
// realigned. Don't do this if this was a funclet epilogue, since the funclets
// will not do realignment or dynamic stack allocation.
if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) &&
!IsFunclet) {
if (TRI->needsStackRealignment(MF))
MBBI = FirstCSPop;
unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
uint64_t LEAAmount =
IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
// There are only two legal forms of epilogue:
// - add SEHAllocationSize, %rsp
// - lea SEHAllocationSize(%FramePtr), %rsp
//
// 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
// However, we may use this sequence if we have a frame pointer because the
// effects of the prologue can safely be undone.
if (LEAAmount != 0) {
unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
FramePtr, false, LEAAmount);
--MBBI;
} else {
unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(FramePtr);
--MBBI;
}
} else if (NumBytes) {
// Adjust stack pointer back: ESP += numbytes.
emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
if (!hasFP(MF) && NeedsDwarfCFI) {
// Define the current CFA rule to use the provided offset.
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
nullptr, -CSSize - SlotSize));
}
--MBBI;
}
// Windows unwinder will not invoke function's exception handler if IP is
// either in prologue or in epilogue. This behavior causes a problem when a
// call immediately precedes an epilogue, because the return address points
// into the epilogue. To cope with that, we insert an epilogue marker here,
// then replace it with a 'nop' if it ends up immediately after a CALL in the
// final emitted code.
if (NeedsWin64CFI && MF.hasWinCFI())
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
if (!hasFP(MF) && NeedsDwarfCFI) {
MBBI = FirstCSPop;
int64_t Offset = -CSSize - SlotSize;
// Mark callee-saved pop instruction.
// Define the current CFA rule to use the provided offset.
while (MBBI != MBB.end()) {
MachineBasicBlock::iterator PI = MBBI;
unsigned Opc = PI->getOpcode();
++MBBI;
if (Opc == X86::POP32r || Opc == X86::POP64r) {
Offset += SlotSize;
BuildCFI(MBB, MBBI, DL,
MCCFIInstruction::createDefCfaOffset(nullptr, Offset));
}
}
}
if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
// Add the return addr area delta back since we are not tail calling.
int Offset = -1 * X86FI->getTCReturnAddrDelta();
assert(Offset >= 0 && "TCDelta should never be positive");
if (Offset) {
// Check for possible merge with preceding ADD instruction.
Offset += mergeSPUpdates(MBB, Terminator, true);
emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
}
}
}
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
bool IsFixed = MFI.isFixedObjectIndex(FI);
// We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment.
if (TRI->hasBasePointer(MF))
FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
else if (TRI->needsStackRealignment(MF))
FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
else
FrameReg = TRI->getFrameRegister(MF);
// Offset will hold the offset from the stack pointer at function entry to the
// object.
// We need to factor in additional offsets applied during the prologue to the
// frame, base, and stack pointer depending on which is used.
int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
uint64_t StackSize = MFI.getStackSize();
bool HasFP = hasFP(MF);
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
int64_t FPDelta = 0;
// In an x86 interrupt, remove the offset we added to account for the return
// address from any stack object allocated in the caller's frame. Interrupts
// do not have a standard return address. Fixed objects in the current frame,
// such as SSE register spills, should not get this treatment.
if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR &&
Offset >= 0) {
Offset += getOffsetOfLocalArea();
}
if (IsWin64Prologue) {
assert(!MFI.hasCalls() || (StackSize % 16) == 8);
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
// If required, include space for extra hidden slot for stashing base pointer.
if (X86FI->getRestoreBasePointer())
FrameSize += SlotSize;
uint64_t NumBytes = FrameSize - CSSize;
uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
if (FI && FI == X86FI->getFAIndex())
return -SEHFrameOffset;
// FPDelta is the offset from the "traditional" FP location of the old base
// pointer followed by return address and the location required by the
// restricted Win64 prologue.
// Add FPDelta to all offsets below that go through the frame pointer.
FPDelta = FrameSize - SEHFrameOffset;
assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
"FPDelta isn't aligned per the Win64 ABI!");
}
if (TRI->hasBasePointer(MF)) {
assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
if (FI < 0) {
// Skip the saved EBP.
return Offset + SlotSize + FPDelta;
} else {
assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
} else if (TRI->needsStackRealignment(MF)) {
if (FI < 0) {
// Skip the saved EBP.
return Offset + SlotSize + FPDelta;
} else {
assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
// FIXME: Support tail calls
} else {
if (!HasFP)
return Offset + StackSize;
// Skip the saved EBP.
Offset += SlotSize;
// Skip the RETADDR move area
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta < 0)
Offset -= TailCallReturnAddrDelta;
}
return Offset + FPDelta;
}
+int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF,
+ int FI, unsigned &FrameReg) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
+ const auto it = WinEHXMMSlotInfo.find(FI);
+
+ if (it == WinEHXMMSlotInfo.end())
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ FrameReg = TRI->getStackRegister();
+ return alignDown(MFI.getMaxCallFrameSize(), getStackAlignment()) + it->second;
+}
+
int X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF,
int FI, unsigned &FrameReg,
int Adjustment) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
FrameReg = TRI->getStackRegister();
return MFI.getObjectOffset(FI) - getOffsetOfLocalArea() + Adjustment;
}
int
X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
int FI, unsigned &FrameReg,
bool IgnoreSPUpdates) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
// Does not include any dynamic realign.
const uint64_t StackSize = MFI.getStackSize();
// LLVM arranges the stack as follows:
// ...
// ARG2
// ARG1
// RETADDR
// PUSH RBP <-- RBP points here
// PUSH CSRs
// ~~~~~~~ <-- possible stack realignment (non-win64)
// ...
// STACK OBJECTS
// ... <-- RSP after prologue points here
// ~~~~~~~ <-- possible stack realignment (win64)
//
// if (hasVarSizedObjects()):
// ... <-- "base pointer" (ESI/RBX) points here
// DYNAMIC ALLOCAS
// ... <-- RSP points here
//
// Case 1: In the simple case of no stack realignment and no dynamic
// allocas, both "fixed" stack objects (arguments and CSRs) are addressable
// with fixed offsets from RSP.
//
// Case 2: In the case of stack realignment with no dynamic allocas, fixed
// stack objects are addressed with RBP and regular stack objects with RSP.
//
// Case 3: In the case of dynamic allocas and stack realignment, RSP is used
// to address stack arguments for outgoing calls and nothing else. The "base
// pointer" points to local variables, and RBP points to fixed objects.
//
// In cases 2 and 3, we can only answer for non-fixed stack objects, and the
// answer we give is relative to the SP after the prologue, and not the
// SP in the middle of the function.
if (MFI.isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
!STI.isTargetWin64())
return getFrameIndexReference(MF, FI, FrameReg);
// If !hasReservedCallFrame the function might have SP adjustement in the
// body. So, even though the offset is statically known, it depends on where
// we are in the function.
if (!IgnoreSPUpdates && !hasReservedCallFrame(MF))
return getFrameIndexReference(MF, FI, FrameReg);
// We don't handle tail calls, and shouldn't be seeing them either.
assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
"we don't handle this case!");
// This is how the math works out:
//
// %rsp grows (i.e. gets lower) left to right. Each box below is
// one word (eight bytes). Obj0 is the stack slot we're trying to
// get to.
//
// ----------------------------------
// | BP | Obj0 | Obj1 | ... | ObjN |
// ----------------------------------
// ^ ^ ^ ^
// A B C E
//
// A is the incoming stack pointer.
// (B - A) is the local area offset (-8 for x86-64) [1]
// (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
//
// |(E - B)| is the StackSize (absolute value, positive). For a
// stack that grown down, this works out to be (B - E). [3]
//
// E is also the value of %rsp after stack has been set up, and we
// want (C - E) -- the value we can add to %rsp to get to Obj0. Now
// (C - E) == (C - A) - (B - A) + (B - E)
// { Using [1], [2] and [3] above }
// == getObjectOffset - LocalAreaOffset + StackSize
return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
}
bool X86FrameLowering::assignCalleeSavedSpillSlots(
MachineFunction &MF, const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CalleeSavedFrameSize = 0;
+ unsigned XMMCalleeSavedFrameSize = 0;
+ auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta < 0) {
// create RETURNADDR area
// arg
// arg
// RETADDR
// { ...
// RETADDR area
// ...
// }
// [EBP]
MFI.CreateFixedObject(-TailCallReturnAddrDelta,
TailCallReturnAddrDelta - SlotSize, true);
}
// Spill the BasePtr if it's used.
if (this->TRI->hasBasePointer(MF)) {
// Allocate a spill slot for EBP if we have a base pointer and EH funclets.
if (MF.hasEHFunclets()) {
int FI = MFI.CreateSpillStackObject(SlotSize, SlotSize);
X86FI->setHasSEHFramePtrSave(true);
X86FI->setSEHFramePtrSaveIndex(FI);
}
}
if (hasFP(MF)) {
// emitPrologue always spills frame register the first thing.
SpillSlotOffset -= SlotSize;
MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
// Since emitPrologue and emitEpilogue will handle spilling and restoring of
// the frame register, we can delete it from CSI list and not have to worry
// about avoiding it later.
unsigned FPReg = TRI->getFrameRegister(MF);
for (unsigned i = 0; i < CSI.size(); ++i) {
if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
CSI.erase(CSI.begin() + i);
break;
}
}
}
// Assign slots for GPRs. It increases frame size.
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
SpillSlotOffset -= SlotSize;
CalleeSavedFrameSize += SlotSize;
int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
}
X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
// Assign slots for XMMs.
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
// If this is k-register make sure we lookup via the largest legal type.
MVT VT = MVT::Other;
if (X86::VK16RegClass.contains(Reg))
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
unsigned Size = TRI->getSpillSize(*RC);
unsigned Align = TRI->getSpillAlignment(*RC);
// ensure alignment
- SpillSlotOffset -= std::abs(SpillSlotOffset) % Align;
+ assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
+ SpillSlotOffset = -alignTo(-SpillSlotOffset, Align);
+
// spill into slot
SpillSlotOffset -= Size;
int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
MFI.ensureMaxAlignment(Align);
+
+ // Save the start offset and size of XMM in stack frame for funclets.
+ if (X86::VR128RegClass.contains(Reg)) {
+ WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
+ XMMCalleeSavedFrameSize += Size;
+ }
}
return true;
}
bool X86FrameLowering::spillCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBB.findDebugLoc(MI);
// Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
// for us, and there are no XMM CSRs on Win32.
if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
return true;
// Push GPRs. It increases frame size.
const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
const MachineRegisterInfo &MRI = MF.getRegInfo();
bool isLiveIn = MRI.isLiveIn(Reg);
if (!isLiveIn)
MBB.addLiveIn(Reg);
// Decide whether we can add a kill flag to the use.
bool CanKill = !isLiveIn;
// Check if any subregister is live-in
if (CanKill) {
for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
if (MRI.isLiveIn(*AReg)) {
CanKill = false;
break;
}
}
}
// Do not set a kill flag on values that are also marked as live-in. This
// happens with the @llvm-returnaddress intrinsic and with arguments
// passed in callee saved registers.
// Omitting the kill flags is conservatively correct even if the live-in
// is not used after all.
BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
.setMIFlag(MachineInstr::FrameSetup);
}
// Make XMM regs spilled. X86 does not have ability of push/pop XMM.
// It can be done by spilling XMMs to stack frame.
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
// If this is k-register make sure we lookup via the largest legal type.
MVT VT = MVT::Other;
if (X86::VK16RegClass.contains(Reg))
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
TRI);
--MI;
MI->setFlag(MachineInstr::FrameSetup);
++MI;
}
return true;
}
void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineInstr *CatchRet) const {
// SEH shouldn't use catchret.
assert(!isAsynchronousEHPersonality(classifyEHPersonality(
MBB.getParent()->getFunction().getPersonalityFn())) &&
"SEH should not use CATCHRET");
DebugLoc DL = CatchRet->getDebugLoc();
MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
// Fill EAX/RAX with the address of the target block.
if (STI.is64Bit()) {
// LEA64r CatchRetTarget(%rip), %rax
BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addMBB(CatchRetTarget)
.addReg(0);
} else {
// MOV32ri $CatchRetTarget, %eax
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
.addMBB(CatchRetTarget);
}
// Record that we've taken the address of CatchRetTarget and no longer just
// reference it in a terminator.
CatchRetTarget->setHasAddressTaken();
}
bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())
return true;
// Don't restore CSRs before an SEH catchret. SEH except blocks do not form
// funclets. emitEpilogue transforms these to normal jumps.
if (MI->getOpcode() == X86::CATCHRET) {
const Function &F = MBB.getParent()->getFunction();
bool IsSEH = isAsynchronousEHPersonality(
classifyEHPersonality(F.getPersonalityFn()));
if (IsSEH)
return true;
}
}
DebugLoc DL = MBB.findDebugLoc(MI);
// Reload XMMs from stack frame.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
if (X86::GR64RegClass.contains(Reg) ||
X86::GR32RegClass.contains(Reg))
continue;
// If this is k-register make sure we lookup via the largest legal type.
MVT VT = MVT::Other;
if (X86::VK16RegClass.contains(Reg))
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
}
// POP GPRs.
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
if (!X86::GR64RegClass.contains(Reg) &&
!X86::GR32RegClass.contains(Reg))
continue;
BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
.setMIFlag(MachineInstr::FrameDestroy);
}
return true;
}
void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
// Spill the BasePtr if it's used.
if (TRI->hasBasePointer(MF)){
unsigned BasePtr = TRI->getBaseRegister();
if (STI.isTarget64BitILP32())
BasePtr = getX86SubSuperRegister(BasePtr, 64);
SavedRegs.set(BasePtr);
}
}
static bool
HasNestArgument(const MachineFunction *MF) {
const Function &F = MF->getFunction();
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; I++) {
if (I->hasNestAttr())
return true;
}
return false;
}
/// GetScratchRegister - Get a temp register for performing work in the
/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
/// and the properties of the function either one or two registers will be
/// needed. Set primary to true for the first register, false for the second.
static unsigned
GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
// Erlang stuff.
if (CallingConvention == CallingConv::HiPE) {
if (Is64Bit)
return Primary ? X86::R14 : X86::R13;
else
return Primary ? X86::EBX : X86::EDI;
}
if (Is64Bit) {
if (IsLP64)
return Primary ? X86::R11 : X86::R12;
else
return Primary ? X86::R11D : X86::R12D;
}
bool IsNested = HasNestArgument(&MF);
if (CallingConvention == CallingConv::X86_FastCall ||
CallingConvention == CallingConv::Fast) {
if (IsNested)
report_fatal_error("Segmented stacks does not support fastcall with "
"nested function.");
return Primary ? X86::EAX : X86::ECX;
}
if (IsNested)
return Primary ? X86::EDX : X86::EAX;
return Primary ? X86::ECX : X86::EAX;
}
// The stack limit in the TCB is set to this many bytes above the actual stack
// limit.
static const uint64_t kSplitStackAvailable = 256;
void X86FrameLowering::adjustForSegmentedStacks(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t StackSize;
unsigned TlsReg, TlsOffset;
DebugLoc DL;
// To support shrink-wrapping we would need to insert the new blocks
// at the right place and update the branches to PrologueMBB.
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
!STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
!STI.isTargetDragonFly())
report_fatal_error("Segmented stacks not supported on this platform.");
// Eventually StackSize will be calculated by a link-time pass; which will
// also decide whether checking code needs to be injected into this particular
// prologue.
StackSize = MFI.getStackSize();
// Do not generate a prologue for leaf functions with a stack of size zero.
// For non-leaf functions we have to allow for the possibility that the
// callis to a non-split function, as in PR37807. This function could also
// take the address of a non-split function. When the linker tries to adjust
// its non-existent prologue, it would fail with an error. Mark the object
// file so that such failures are not errors. See this Go language bug-report
// https://go-review.googlesource.com/c/go/+/148819/
if (StackSize == 0 && !MFI.hasTailCall()) {
MF.getMMI().setHasNosplitStack(true);
return;
}
MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
bool IsNested = false;
// We need to know if the function has a nest argument only in 64 bit mode.
if (Is64Bit)
IsNested = HasNestArgument(&MF);
// The MOV R10, RAX needs to be in a different block, since the RET we emit in
// allocMBB needs to be last (terminating) instruction.
for (const auto &LI : PrologueMBB.liveins()) {
allocMBB->addLiveIn(LI);
checkMBB->addLiveIn(LI);
}
if (IsNested)
allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
MF.push_front(allocMBB);
MF.push_front(checkMBB);
// When the frame size is less than 256 we just compare the stack
// boundary directly to the value of the stack pointer, per gcc.
bool CompareStackPointer = StackSize < kSplitStackAvailable;
// Read the limit off the current stacklet off the stack_guard location.
if (Is64Bit) {
if (STI.isTargetLinux()) {
TlsReg = X86::FS;
TlsOffset = IsLP64 ? 0x70 : 0x40;
} else if (STI.isTargetDarwin()) {
TlsReg = X86::GS;
TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
} else if (STI.isTargetWin64()) {
TlsReg = X86::GS;
TlsOffset = 0x28; // pvArbitrary, reserved for application use
} else if (STI.isTargetFreeBSD()) {
TlsReg = X86::FS;
TlsOffset = 0x18;
} else if (STI.isTargetDragonFly()) {
TlsReg = X86::FS;
TlsOffset = 0x20; // use tls_tcb.tcb_segstack
} else {
report_fatal_error("Segmented stacks not supported on this platform.");
}
if (CompareStackPointer)
ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
else
BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
.addImm(1).addReg(0).addImm(-StackSize).addReg(0);
BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else {
if (STI.isTargetLinux()) {
TlsReg = X86::GS;
TlsOffset = 0x30;
} else if (STI.isTargetDarwin()) {
TlsReg = X86::GS;
TlsOffset = 0x48 + 90*4;
} else if (STI.isTargetWin32()) {
TlsReg = X86::FS;
TlsOffset = 0x14; // pvArbitrary, reserved for application use
} else if (STI.isTargetDragonFly()) {
TlsReg = X86::FS;
TlsOffset = 0x10; // use tls_tcb.tcb_segstack
} else if (STI.isTargetFreeBSD()) {
report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
} else {
report_fatal_error("Segmented stacks not supported on this platform.");
}
if (CompareStackPointer)
ScratchReg = X86::ESP;
else
BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
.addImm(1).addReg(0).addImm(-StackSize).addReg(0);
if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
STI.isTargetDragonFly()) {
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else if (STI.isTargetDarwin()) {
// TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
unsigned ScratchReg2;
bool SaveScratch2;
if (CompareStackPointer) {
// The primary scratch register is available for holding the TLS offset.
ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
SaveScratch2 = false;
} else {
// Need to use a second register to hold the TLS offset
ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
// Unfortunately, with fastcc the second scratch register may hold an
// argument.
SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
}
// If Scratch2 is live-in then it needs to be saved.
assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
"Scratch register is live-in and not saved");
if (SaveScratch2)
BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
.addReg(ScratchReg2, RegState::Kill);
BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
.addImm(TlsOffset);
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
.addReg(ScratchReg)
.addReg(ScratchReg2).addImm(1).addReg(0)
.addImm(0)
.addReg(TlsReg);
if (SaveScratch2)
BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
}
}
// This jump is taken if SP >= (Stacklet Limit + Stack Space required).
// It jumps to normal execution of the function body.
BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A);
// On 32 bit we first push the arguments size and then the frame size. On 64
// bit, we pass the stack frame size in r10 and the argument size in r11.
if (Is64Bit) {
// Functions with nested arguments use R10, so it needs to be saved across
// the call to _morestack
const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
if (IsNested)
BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
.addImm(StackSize);
BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
.addImm(X86FI->getArgumentStackSize());
} else {
BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
.addImm(X86FI->getArgumentStackSize());
BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
.addImm(StackSize);
}
// __morestack is in libgcc
if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
// Under the large code model, we cannot assume that __morestack lives
// within 2^31 bytes of the call site, so we cannot use pc-relative
// addressing. We cannot perform the call via a temporary register,
// as the rax register may be used to store the static chain, and all
// other suitable registers may be either callee-save or used for
// parameter passing. We cannot use the stack at this point either
// because __morestack manipulates the stack directly.
//
// To avoid these issues, perform an indirect call via a read-only memory
// location containing the address.
//
// This solution is not perfect, as it assumes that the .rodata section
// is laid out within 2^31 bytes of each function body, but this seems
// to be sufficient for JIT.
// FIXME: Add retpoline support and remove the error here..
if (STI.useRetpolineIndirectCalls())
report_fatal_error("Emitting morestack calls on 64-bit with the large "
"code model and retpoline not yet implemented.");
BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addExternalSymbol("__morestack_addr")
.addReg(0);
MF.getMMI().setUsesMorestackAddr(true);
} else {
if (Is64Bit)
BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack");
else
BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__morestack");
}
if (IsNested)
BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
else
BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
allocMBB->addSuccessor(&PrologueMBB);
checkMBB->addSuccessor(allocMBB, BranchProbability::getZero());
checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne());
#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
/// Lookup an ERTS parameter in the !hipe.literals named metadata node.
/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
/// to fields it needs, through a named metadata node "hipe.literals" containing
/// name-value pairs.
static unsigned getHiPELiteral(
NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
MDNode *Node = HiPELiteralsMD->getOperand(i);
if (Node->getNumOperands() != 2) continue;
MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
if (!NodeName || !NodeVal) continue;
ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
if (ValConst && NodeName->getString() == LiteralName) {
return ValConst->getZExtValue();
}
}
report_fatal_error("HiPE literal " + LiteralName
+ " required but not provided");
}
/// Erlang programs may need a special prologue to handle the stack size they
/// might need at runtime. That is because Erlang/OTP does not implement a C
/// stack but uses a custom implementation of hybrid stack/heap architecture.
/// (for more information see Eric Stenman's Ph.D. thesis:
/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
///
/// CheckStack:
/// temp0 = sp - MaxStack
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
/// OldStart:
/// ...
/// IncStack:
/// call inc_stack # doubles the stack space
/// temp0 = sp - MaxStack
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
DebugLoc DL;
// To support shrink-wrapping we would need to insert the new blocks
// at the right place and update the branches to PrologueMBB.
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
// HiPE-specific values
NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
->getNamedMetadata("hipe.literals");
if (!HiPELiteralsMD)
report_fatal_error(
"Can't generate HiPE prologue without runtime parameters");
const unsigned HipeLeafWords
= getHiPELiteral(HiPELiteralsMD,
Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
MF.getFunction().arg_size() - CCRegisteredArgs : 0;
unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
assert(STI.isTargetLinux() &&
"HiPE prologue is only supported on Linux operating systems.");
// Compute the largest caller's frame that is needed to fit the callees'
// frames. This 'MaxStack' is computed from:
//
// a) the fixed frame size, which is the space needed for all spilled temps,
// b) outgoing on-stack parameter areas, and
// c) the minimum stack space this function needs to make available for the
// functions it calls (a tunable ABI property).
if (MFI.hasCalls()) {
unsigned MoreStackForCalls = 0;
for (auto &MBB : MF) {
for (auto &MI : MBB) {
if (!MI.isCall())
continue;
// Get callee operand.
const MachineOperand &MO = MI.getOperand(0);
// Only take account of global function calls (no closures etc.).
if (!MO.isGlobal())
continue;
const Function *F = dyn_cast<Function>(MO.getGlobal());
if (!F)
continue;
// Do not update 'MaxStack' for primitive and built-in functions
// (encoded with names either starting with "erlang."/"bif_" or not
// having a ".", such as a simple <Module>.<Function>.<Arity>, or an
// "_", such as the BIF "suspend_0") as they are executed on another
// stack.
if (F->getName().find("erlang.") != StringRef::npos ||
F->getName().find("bif_") != StringRef::npos ||
F->getName().find_first_of("._") == StringRef::npos)
continue;
unsigned CalleeStkArity =
F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
if (HipeLeafWords - 1 > CalleeStkArity)
MoreStackForCalls = std::max(MoreStackForCalls,
(HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
}
}
MaxStack += MoreStackForCalls;
}
// If the stack frame needed is larger than the guaranteed then runtime checks
// and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
if (MaxStack > Guaranteed) {
MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
for (const auto &LI : PrologueMBB.liveins()) {
stackCheckMBB->addLiveIn(LI);
incStackMBB->addLiveIn(LI);
}
MF.push_front(incStackMBB);
MF.push_front(stackCheckMBB);
unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
unsigned LEAop, CMPop, CALLop;
SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
if (Is64Bit) {
SPReg = X86::RSP;
PReg = X86::RBP;
LEAop = X86::LEA64r;
CMPop = X86::CMP64rm;
CALLop = X86::CALL64pcrel32;
} else {
SPReg = X86::ESP;
PReg = X86::EBP;
LEAop = X86::LEA32r;
CMPop = X86::CMP32rm;
CALLop = X86::CALLpcrel32;
}
ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"HiPE prologue scratch register is live-in");
// Create new MBB for StackCheck:
addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
SPReg, false, -MaxStack);
// SPLimitOffset is in a fixed heap location (pointed by BP).
addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
.addReg(ScratchReg), PReg, false, SPLimitOffset);
BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE);
// Create new MBB for IncStack:
BuildMI(incStackMBB, DL, TII.get(CALLop)).
addExternalSymbol("inc_stack_0");
addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
SPReg, false, -MaxStack);
addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
.addReg(ScratchReg), PReg, false, SPLimitOffset);
BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE);
stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
incStackMBB->addSuccessor(incStackMBB, {1, 100});
}
#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL,
int Offset) const {
if (Offset <= 0)
return false;
if (Offset % SlotSize)
return false;
int NumPops = Offset / SlotSize;
// This is only worth it if we have at most 2 pops.
if (NumPops != 1 && NumPops != 2)
return false;
// Handle only the trivial case where the adjustment directly follows
// a call. This is the most common one, anyway.
if (MBBI == MBB.begin())
return false;
MachineBasicBlock::iterator Prev = std::prev(MBBI);
if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
return false;
unsigned Regs[2];
unsigned FoundRegs = 0;
auto &MRI = MBB.getParent()->getRegInfo();
auto RegMask = Prev->getOperand(1);
auto &RegClass =
Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
// Try to find up to NumPops free registers.
for (auto Candidate : RegClass) {
// Poor man's liveness:
// Since we're immediately after a call, any register that is clobbered
// by the call and not defined by it can be considered dead.
if (!RegMask.clobbersPhysReg(Candidate))
continue;
// Don't clobber reserved registers
if (MRI.isReserved(Candidate))
continue;
bool IsDef = false;
for (const MachineOperand &MO : Prev->implicit_operands()) {
if (MO.isReg() && MO.isDef() &&
TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
IsDef = true;
break;
}
}
if (IsDef)
continue;
Regs[FoundRegs++] = Candidate;
if (FoundRegs == (unsigned)NumPops)
break;
}
if (FoundRegs == 0)
return false;
// If we found only one free register, but need two, reuse the same one twice.
while (FoundRegs < (unsigned)NumPops)
Regs[FoundRegs++] = Regs[0];
for (int i = 0; i < NumPops; ++i)
BuildMI(MBB, MBBI, DL,
TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
return true;
}
MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
bool reserveCallFrame = hasReservedCallFrame(MF);
unsigned Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
DebugLoc DL = I->getDebugLoc();
uint64_t Amount = !reserveCallFrame ? TII.getFrameSize(*I) : 0;
uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
I = MBB.erase(I);
auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
if (!reserveCallFrame) {
// If the stack pointer can be changed after prologue, turn the
// adjcallstackup instruction into a 'sub ESP, <amt>' and the
// adjcallstackdown instruction into 'add ESP, <amt>'
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function &F = MF.getFunction();
bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool DwarfCFI = !WindowsCFI &&
(MMI.hasDebugInfo() || F.needsUnwindTableEntry());
// If we have any exception handlers in this function, and we adjust
// the SP before calls, we may need to indicate this to the unwinder
// using GNU_ARGS_SIZE. Note that this may be necessary even when
// Amount == 0, because the preceding function may have set a non-0
// GNU_ARGS_SIZE.
// TODO: We don't need to reset this between subsequent functions,
// if it didn't change.
bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
if (HasDwarfEHHandlers && !isDestroy &&
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
BuildCFI(MBB, InsertPos, DL,
MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
if (Amount == 0)
return I;
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
Amount -= InternalAmt;
// TODO: This is needed only if we require precise CFA.
// If this is a callee-pop calling convention, emit a CFA adjust for
// the amount the callee popped.
if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
BuildCFI(MBB, InsertPos, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
// Add Amount to SP to destroy a frame, or subtract to setup.
int64_t StackAdjustment = isDestroy ? Amount : -Amount;
if (StackAdjustment) {
// Merge with any previous or following adjustment instruction. Note: the
// instructions merged with here do not have CFI, so their stack
// adjustments do not feed into CfaAdjustment.
StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
if (StackAdjustment) {
if (!(F.hasMinSize() &&
adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
/*InEpilogue=*/false);
}
}
if (DwarfCFI && !hasFP(MF)) {
// If we don't have FP, but need to generate unwind information,
// we need to set the correct CFA offset after the stack adjustment.
// How much we adjust the CFA offset depends on whether we're emitting
// CFI only for EH purposes or for debugging. EH only requires the CFA
// offset to be correct at each call site, while for debugging we want
// it to be more precise.
int64_t CfaAdjustment = -StackAdjustment;
// TODO: When not using precise CFA, we also need to adjust for the
// InternalAmt here.
if (CfaAdjustment) {
BuildCFI(MBB, InsertPos, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr,
CfaAdjustment));
}
}
return I;
}
if (isDestroy && InternalAmt) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
MachineBasicBlock::iterator CI = I;
MachineBasicBlock::iterator B = MBB.begin();
while (CI != B && !std::prev(CI)->isCall())
--CI;
BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
}
return I;
}
bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
assert(MBB.getParent() && "Block is not attached to a function!");
const MachineFunction &MF = *MBB.getParent();
return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
}
bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
assert(MBB.getParent() && "Block is not attached to a function!");
// Win64 has strict requirements in terms of epilogue and we are
// not taking a chance at messing with them.
// I.e., unless this block is already an exit block, we can't use
// it as an epilogue.
if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
return false;
if (canUseLEAForSPInEpilogue(*MBB.getParent()))
return true;
// If we cannot use LEA to adjust SP, we may need to use ADD, which
// clobbers the EFLAGS. Check that we do not need to preserve it,
// otherwise, conservatively assume this is not
// safe to insert the epilogue here.
return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
}
bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// If we may need to emit frameless compact unwind information, give
// up as this is currently broken: PR25614.
return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
// The lowering of segmented stack and HiPE only support entry blocks
// as prologue blocks: PR26107.
// This limitation may be lifted if we fix:
// - adjustForSegmentedStacks
// - adjustForHiPEPrologue
MF.getFunction().getCallingConv() != CallingConv::HiPE &&
!MF.shouldSplitStack();
}
MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool RestoreSP) const {
assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
assert(STI.is32Bit() && !Uses64BitFramePtr &&
"restoring EBP/ESI on non-32-bit target");
MachineFunction &MF = *MBB.getParent();
unsigned FramePtr = TRI->getFrameRegister(MF);
unsigned BasePtr = TRI->getBaseRegister();
WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineFrameInfo &MFI = MF.getFrameInfo();
// FIXME: Don't set FrameSetup flag in catchret case.
int FI = FuncInfo.EHRegNodeFrameIndex;
int EHRegSize = MFI.getObjectSize(FI);
if (RestoreSP) {
// MOV32rm -EHRegSize(%ebp), %esp
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
X86::EBP, true, -EHRegSize)
.setMIFlag(MachineInstr::FrameSetup);
}
unsigned UsedReg;
int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
int EndOffset = -EHRegOffset - EHRegSize;
FuncInfo.EHRegNodeEndOffset = EndOffset;
if (UsedReg == FramePtr) {
// ADD $offset, %ebp
unsigned ADDri = getADDriOpcode(false, EndOffset);
BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
.addReg(FramePtr)
.addImm(EndOffset)
.setMIFlag(MachineInstr::FrameSetup)
->getOperand(3)
.setIsDead();
assert(EndOffset >= 0 &&
"end of registration object above normal EBP position!");
} else if (UsedReg == BasePtr) {
// LEA offset(%ebp), %esi
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
FramePtr, false, EndOffset)
.setMIFlag(MachineInstr::FrameSetup);
// MOV32rm SavedEBPOffset(%esi), %ebp
assert(X86FI->getHasSEHFramePtrSave());
int Offset =
getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
assert(UsedReg == BasePtr);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
UsedReg, true, Offset)
.setMIFlag(MachineInstr::FrameSetup);
} else {
llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
}
return MBBI;
}
int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
return TRI->getSlotSize();
}
unsigned X86FrameLowering::getInitialCFARegister(const MachineFunction &MF)
const {
return TRI->getDwarfRegNum(StackPtr, true);
}
namespace {
// Struct used by orderFrameObjects to help sort the stack objects.
struct X86FrameSortingObject {
bool IsValid = false; // true if we care about this Object.
unsigned ObjectIndex = 0; // Index of Object into MFI list.
unsigned ObjectSize = 0; // Size of Object in bytes.
unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
unsigned ObjectNumUses = 0; // Object static number of uses.
};
// The comparison function we use for std::sort to order our local
// stack symbols. The current algorithm is to use an estimated
// "density". This takes into consideration the size and number of
// uses each object has in order to roughly minimize code size.
// So, for example, an object of size 16B that is referenced 5 times
// will get higher priority than 4 4B objects referenced 1 time each.
// It's not perfect and we may be able to squeeze a few more bytes out of
// it (for example : 0(esp) requires fewer bytes, symbols allocated at the
// fringe end can have special consideration, given their size is less
// important, etc.), but the algorithmic complexity grows too much to be
// worth the extra gains we get. This gets us pretty close.
// The final order leaves us with objects with highest priority going
// at the end of our list.
struct X86FrameSortingComparator {
inline bool operator()(const X86FrameSortingObject &A,
const X86FrameSortingObject &B) {
uint64_t DensityAScaled, DensityBScaled;
// For consistency in our comparison, all invalid objects are placed
// at the end. This also allows us to stop walking when we hit the
// first invalid item after it's all sorted.
if (!A.IsValid)
return false;
if (!B.IsValid)
return true;
// The density is calculated by doing :
// (double)DensityA = A.ObjectNumUses / A.ObjectSize
// (double)DensityB = B.ObjectNumUses / B.ObjectSize
// Since this approach may cause inconsistencies in
// the floating point <, >, == comparisons, depending on the floating
// point model with which the compiler was built, we're going
// to scale both sides by multiplying with
// A.ObjectSize * B.ObjectSize. This ends up factoring away
// the division and, with it, the need for any floating point
// arithmetic.
DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
static_cast<uint64_t>(B.ObjectSize);
DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
static_cast<uint64_t>(A.ObjectSize);
// If the two densities are equal, prioritize highest alignment
// objects. This allows for similar alignment objects
// to be packed together (given the same density).
// There's room for improvement here, also, since we can pack
// similar alignment (different density) objects next to each
// other to save padding. This will also require further
// complexity/iterations, and the overall gain isn't worth it,
// in general. Something to keep in mind, though.
if (DensityAScaled == DensityBScaled)
return A.ObjectAlignment < B.ObjectAlignment;
return DensityAScaled < DensityBScaled;
}
};
} // namespace
// Order the symbols in the local stack.
// We want to place the local stack objects in some sort of sensible order.
// The heuristic we use is to try and pack them according to static number
// of uses and size of object in order to minimize code size.
void X86FrameLowering::orderFrameObjects(
const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
// Don't waste time if there's nothing to do.
if (ObjectsToAllocate.empty())
return;
// Create an array of all MFI objects. We won't need all of these
// objects, but we're going to create a full array of them to make
// it easier to index into when we're counting "uses" down below.
// We want to be able to easily/cheaply access an object by simply
// indexing into it, instead of having to search for it every time.
std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
// Walk the objects we care about and mark them as such in our working
// struct.
for (auto &Obj : ObjectsToAllocate) {
SortingObjects[Obj].IsValid = true;
SortingObjects[Obj].ObjectIndex = Obj;
SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlignment(Obj);
// Set the size.
int ObjectSize = MFI.getObjectSize(Obj);
if (ObjectSize == 0)
// Variable size. Just use 4.
SortingObjects[Obj].ObjectSize = 4;
else
SortingObjects[Obj].ObjectSize = ObjectSize;
}
// Count the number of uses for each object.
for (auto &MBB : MF) {
for (auto &MI : MBB) {
if (MI.isDebugInstr())
continue;
for (const MachineOperand &MO : MI.operands()) {
// Check to see if it's a local stack symbol.
if (!MO.isFI())
continue;
int Index = MO.getIndex();
// Check to see if it falls within our range, and is tagged
// to require ordering.
if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
SortingObjects[Index].IsValid)
SortingObjects[Index].ObjectNumUses++;
}
}
}
// Sort the objects using X86FrameSortingAlgorithm (see its comment for
// info).
llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
// Now modify the original list to represent the final order that
// we want. The order will depend on whether we're going to access them
// from the stack pointer or the frame pointer. For SP, the list should
// end up with the END containing objects that we want with smaller offsets.
// For FP, it should be flipped.
int i = 0;
for (auto &Obj : SortingObjects) {
// All invalid items are sorted at the end, so it's safe to stop.
if (!Obj.IsValid)
break;
ObjectsToAllocate[i++] = Obj.ObjectIndex;
}
// Flip it if we're accessing off of the FP.
if (!TRI->needsStackRealignment(MF) && hasFP(MF))
std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
}
unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
// RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
unsigned Offset = 16;
// RBP is immediately pushed.
Offset += SlotSize;
// All callee-saved registers are then pushed.
Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
// Every funclet allocates enough stack space for the largest outgoing call.
Offset += getWinEHFuncletFrameSize(MF);
return Offset;
}
void X86FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
// Mark the function as not having WinCFI. We will set it back to true in
// emitPrologue if it gets called and emits CFI.
MF.setHasWinCFI(false);
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
const Function &F = MF.getFunction();
if (!STI.is64Bit() || !MF.hasEHFunclets() ||
classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
return;
// Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
// relative to RSP after the prologue. Find the offset of the last fixed
// object, so that we can allocate a slot immediately following it. If there
// were no fixed objects, use offset -SlotSize, which is immediately after the
// return address. Fixed objects have negative frame indices.
MachineFrameInfo &MFI = MF.getFrameInfo();
WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
int64_t MinFixedObjOffset = -SlotSize;
for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
for (WinEHHandlerType &H : TBME.HandlerArray) {
int FrameIndex = H.CatchObj.FrameIndex;
if (FrameIndex != INT_MAX) {
// Ensure alignment.
unsigned Align = MFI.getObjectAlignment(FrameIndex);
MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
}
}
}
// Ensure alignment.
MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past
// other frame setup instructions.
MachineBasicBlock &MBB = MF.front();
auto MBBI = MBB.begin();
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
++MBBI;
DebugLoc DL = MBB.findDebugLoc(MBBI);
addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
UnwindHelpFI)
.addImm(-2);
}
Index: stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h (revision 356465)
@@ -1,224 +1,226 @@
//===-- X86TargetFrameLowering.h - Define frame lowering for X86 -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class implements X86-specific bits of TargetFrameLowering class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
#define LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
#include "llvm/CodeGen/TargetFrameLowering.h"
namespace llvm {
class MachineInstrBuilder;
class MCCFIInstruction;
class X86InstrInfo;
class X86Subtarget;
class X86RegisterInfo;
class X86FrameLowering : public TargetFrameLowering {
public:
X86FrameLowering(const X86Subtarget &STI, unsigned StackAlignOverride);
// Cached subtarget predicates.
const X86Subtarget &STI;
const X86InstrInfo &TII;
const X86RegisterInfo *TRI;
unsigned SlotSize;
/// Is64Bit implies that x86_64 instructions are available.
bool Is64Bit;
bool IsLP64;
/// True if the 64-bit frame or stack pointer should be used. True for most
/// 64-bit targets with the exception of x32. If this is false, 32-bit
/// instruction operands should be used to manipulate StackPtr and FramePtr.
bool Uses64BitFramePtr;
unsigned StackPtr;
/// Emit target stack probe code. This is required for all
/// large stack allocations on Windows. The caller is required to materialize
/// the number of bytes to probe in RAX/EAX.
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
bool InProlog) const;
/// Replace a StackProbe inline-stub with the actual probe code inline.
void inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const override;
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL) const;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void adjustForSegmentedStacks(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const override;
void adjustForHiPEPrologue(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const override;
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS = nullptr) const override;
bool
assignCalleeSavedSpillSlots(MachineFunction &MF,
const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
bool needsFrameIndexResolution(const MachineFunction &MF) const override;
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
+ int getWin64EHFrameIndexRef(const MachineFunction &MF,
+ int FI, unsigned &SPReg) const;
int getFrameIndexReferenceSP(const MachineFunction &MF,
int FI, unsigned &SPReg, int Adjustment) const;
int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
unsigned &FrameReg,
bool IgnoreSPUpdates) const override;
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
/// Check the instruction before/after the passed instruction. If
/// it is an ADD/SUB/LEA instruction it is deleted argument and the
/// stack adjustment is returned as a positive value for ADD/LEA and
/// a negative for SUB.
int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
bool doMergeWithPrevious) const;
/// Emit a series of instructions to increment / decrement the stack
/// pointer by a constant value.
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const;
/// Check that LEA can be used on SP in an epilogue sequence for \p MF.
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const;
/// Check whether or not the given \p MBB can be used as a prologue
/// for the target.
/// The prologue will be inserted first in this basic block.
/// This method is used by the shrink-wrapping pass to decide if
/// \p MBB will be correctly handled by the target.
/// As soon as the target enable shrink-wrapping without overriding
/// this method, we assume that each basic block is a valid
/// prologue.
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
/// Check whether or not the given \p MBB can be used as a epilogue
/// for the target.
/// The epilogue will be inserted before the first terminator of that block.
/// This method is used by the shrink-wrapping pass to decide if
/// \p MBB will be correctly handled by the target.
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override;
/// Returns true if the target will correctly handle shrink wrapping.
bool enableShrinkWrapping(const MachineFunction &MF) const override;
/// Order the symbols in the local stack.
/// We want to place the local stack objects in some sort of sensible order.
/// The heuristic we use is to try and pack them according to static number
/// of uses and size in order to minimize code size.
void orderFrameObjects(const MachineFunction &MF,
SmallVectorImpl<int> &ObjectsToAllocate) const override;
/// Wraps up getting a CFI index and building a MachineInstr for it.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, const MCCFIInstruction &CFIInst) const;
/// Sets up EBP and optionally ESI based on the incoming EBP value. Only
/// needed for 32-bit. Used in funclet prologues and at catchret destinations.
MachineBasicBlock::iterator
restoreWin32EHStackPointers(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool RestoreSP = false) const;
int getInitialCFAOffset(const MachineFunction &MF) const override;
unsigned getInitialCFARegister(const MachineFunction &MF) const override;
/// Return true if the function has a redzone (accessible bytes past the
/// frame of the top of stack function) as part of it's ABI.
bool has128ByteRedZone(const MachineFunction& MF) const;
private:
uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
/// Emit target stack probe as a call to a helper function
void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
bool InProlog) const;
/// Emit target stack probe as an inline sequence.
void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool InProlog) const;
/// Emit a stub to later inline the target stack probe.
void emitStackProbeInlineStub(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool InProlog) const;
/// Aligns the stack pointer by ANDing it with -MaxAlign.
void BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned Reg, uint64_t MaxAlign) const;
/// Make small positive stack adjustments using POPs.
bool adjustStackWithPops(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
int Offset) const;
/// Adjusts the stack pointer using LEA, SUB, or ADD.
MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, int64_t Offset,
bool InEpilogue) const;
unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const;
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const;
/// Materialize the catchret target MBB in RAX.
void emitCatchRetReturnValue(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineInstr *CatchRet) const;
};
} // End llvm namespace
#endif
Index: stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp (revision 356465)
@@ -1,7855 +1,7859 @@
//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "X86InstrInfo.h"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrFoldTables.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define DEBUG_TYPE "x86-instr-info"
#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"
static cl::opt<bool>
NoFusing("disable-spill-fusing",
cl::desc("Disable fusing of spill code into instructions"),
cl::Hidden);
static cl::opt<bool>
PrintFailedFusing("print-failed-fuse-candidates",
cl::desc("Print instructions that the allocator wants to"
" fuse, but the X86 backend currently can't"),
cl::Hidden);
static cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
static cl::opt<unsigned>
PartialRegUpdateClearance("partial-reg-update-clearance",
cl::desc("Clearance between two register writes "
"for inserting XOR to avoid partial "
"register update"),
cl::init(64), cl::Hidden);
static cl::opt<unsigned>
UndefRegClearance("undef-reg-clearance",
cl::desc("How many idle instructions we would like before "
"certain undef register reads"),
cl::init(128), cl::Hidden);
// Pin the vtable to this file.
void X86InstrInfo::anchor() {}
X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
: X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
: X86::ADJCALLSTACKDOWN32),
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
: X86::ADJCALLSTACKUP32),
X86::CATCHRET,
(STI.is64Bit() ? X86::RETQ : X86::RETL)),
Subtarget(STI), RI(STI.getTargetTriple()) {
}
bool
X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const {
switch (MI.getOpcode()) {
default: break;
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
if (!Subtarget.is64Bit())
// It's not always legal to reference the low 8-bit of the larger
// register in 32-bit mode.
return false;
LLVM_FALLTHROUGH;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
case X86::MOVSX64rr32: {
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
// Be conservative.
return false;
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
SubIdx = X86::sub_8bit;
break;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
SubIdx = X86::sub_16bit;
break;
case X86::MOVSX64rr32:
SubIdx = X86::sub_32bit;
break;
}
return true;
}
}
return false;
}
int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
if (isFrameInstr(MI)) {
unsigned StackAlign = TFI->getStackAlignment();
int SPAdj = alignTo(getFrameSize(MI), StackAlign);
SPAdj -= getFrameAdjustment(MI);
if (!isFrameSetup(MI))
SPAdj = -SPAdj;
return SPAdj;
}
// To know whether a call adjusts the stack, we need information
// that is bound to the following ADJCALLSTACKUP pseudo.
// Look for the next ADJCALLSTACKUP that follows the call.
if (MI.isCall()) {
const MachineBasicBlock *MBB = MI.getParent();
auto I = ++MachineBasicBlock::const_iterator(MI);
for (auto E = MBB->end(); I != E; ++I) {
if (I->getOpcode() == getCallFrameDestroyOpcode() ||
I->isCall())
break;
}
// If we could not find a frame destroy opcode, then it has already
// been simplified, so we don't care.
if (I->getOpcode() != getCallFrameDestroyOpcode())
return 0;
return -(I->getOperand(1).getImm());
}
// Currently handle only PUSHes we can reasonably expect to see
// in call sequences
switch (MI.getOpcode()) {
default:
return 0;
case X86::PUSH32i8:
case X86::PUSH32r:
case X86::PUSH32rmm:
case X86::PUSH32rmr:
case X86::PUSHi32:
return 4;
case X86::PUSH64i8:
case X86::PUSH64r:
case X86::PUSH64rmm:
case X86::PUSH64rmr:
case X86::PUSH64i32:
return 8;
}
}
/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const {
if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
MI.getOperand(Op + X86::AddrDisp).isImm() &&
MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
return true;
}
return false;
}
static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
switch (Opcode) {
default:
return false;
case X86::MOV8rm:
case X86::KMOVBkm:
MemBytes = 1;
return true;
case X86::MOV16rm:
case X86::KMOVWkm:
MemBytes = 2;
return true;
case X86::MOV32rm:
case X86::MOVSSrm:
case X86::MOVSSrm_alt:
case X86::VMOVSSrm:
case X86::VMOVSSrm_alt:
case X86::VMOVSSZrm:
case X86::VMOVSSZrm_alt:
case X86::KMOVDkm:
MemBytes = 4;
return true;
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSDrm:
case X86::MOVSDrm_alt:
case X86::VMOVSDrm:
case X86::VMOVSDrm_alt:
case X86::VMOVSDZrm:
case X86::VMOVSDZrm_alt:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::KMOVQkm:
MemBytes = 8;
return true;
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
MemBytes = 16;
return true;
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
MemBytes = 32;
return true;
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
MemBytes = 64;
return true;
}
}
static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
switch (Opcode) {
default:
return false;
case X86::MOV8mr:
case X86::KMOVBmk:
MemBytes = 1;
return true;
case X86::MOV16mr:
case X86::KMOVWmk:
MemBytes = 2;
return true;
case X86::MOV32mr:
case X86::MOVSSmr:
case X86::VMOVSSmr:
case X86::VMOVSSZmr:
case X86::KMOVDmk:
MemBytes = 4;
return true;
case X86::MOV64mr:
case X86::ST_FpP64m:
case X86::MOVSDmr:
case X86::VMOVSDmr:
case X86::VMOVSDZmr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
case X86::KMOVQmk:
MemBytes = 8;
return true;
case X86::MOVAPSmr:
case X86::MOVUPSmr:
case X86::MOVAPDmr:
case X86::MOVUPDmr:
case X86::MOVDQAmr:
case X86::MOVDQUmr:
case X86::VMOVAPSmr:
case X86::VMOVUPSmr:
case X86::VMOVAPDmr:
case X86::VMOVUPDmr:
case X86::VMOVDQAmr:
case X86::VMOVDQUmr:
case X86::VMOVUPSZ128mr:
case X86::VMOVAPSZ128mr:
case X86::VMOVUPSZ128mr_NOVLX:
case X86::VMOVAPSZ128mr_NOVLX:
case X86::VMOVUPDZ128mr:
case X86::VMOVAPDZ128mr:
case X86::VMOVDQA32Z128mr:
case X86::VMOVDQU32Z128mr:
case X86::VMOVDQA64Z128mr:
case X86::VMOVDQU64Z128mr:
case X86::VMOVDQU8Z128mr:
case X86::VMOVDQU16Z128mr:
MemBytes = 16;
return true;
case X86::VMOVUPSYmr:
case X86::VMOVAPSYmr:
case X86::VMOVUPDYmr:
case X86::VMOVAPDYmr:
case X86::VMOVDQUYmr:
case X86::VMOVDQAYmr:
case X86::VMOVUPSZ256mr:
case X86::VMOVAPSZ256mr:
case X86::VMOVUPSZ256mr_NOVLX:
case X86::VMOVAPSZ256mr_NOVLX:
case X86::VMOVUPDZ256mr:
case X86::VMOVAPDZ256mr:
case X86::VMOVDQU8Z256mr:
case X86::VMOVDQU16Z256mr:
case X86::VMOVDQA32Z256mr:
case X86::VMOVDQU32Z256mr:
case X86::VMOVDQA64Z256mr:
case X86::VMOVDQU64Z256mr:
MemBytes = 32;
return true;
case X86::VMOVUPSZmr:
case X86::VMOVAPSZmr:
case X86::VMOVUPDZmr:
case X86::VMOVAPDZmr:
case X86::VMOVDQU8Zmr:
case X86::VMOVDQU16Zmr:
case X86::VMOVDQA32Zmr:
case X86::VMOVDQU32Zmr:
case X86::VMOVDQA64Zmr:
case X86::VMOVDQU64Zmr:
MemBytes = 64;
return true;
}
return false;
}
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
}
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const {
if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
return MI.getOperand(0).getReg();
return 0;
}
unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
unsigned Reg;
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
return Reg;
// Check for post-frame index elimination operations
SmallVector<const MachineMemOperand *, 1> Accesses;
if (hasLoadFromStackSlot(MI, Accesses)) {
FrameIndex =
cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
->getFrameIndex();
return 1;
}
}
return 0;
}
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
}
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const {
if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
isFrameOperand(MI, 0, FrameIndex))
return MI.getOperand(X86::AddrNumOperands).getReg();
return 0;
}
unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
unsigned Reg;
if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
return Reg;
// Check for post-frame index elimination operations
SmallVector<const MachineMemOperand *, 1> Accesses;
if (hasStoreToStackSlot(MI, Accesses)) {
FrameIndex =
cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
->getFrameIndex();
return 1;
}
}
return 0;
}
/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
// Don't waste compile time scanning use-def chains of physregs.
if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
return false;
bool isPICBase = false;
for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
E = MRI.def_instr_end(); I != E; ++I) {
MachineInstr *DefMI = &*I;
if (DefMI->getOpcode() != X86::MOVPC32r)
return false;
assert(!isPICBase && "More than one PIC base?");
isPICBase = true;
}
return isPICBase;
}
bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
switch (MI.getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV8rm_NOREX:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::MOVSSrm:
case X86::MOVSSrm_alt:
case X86::MOVSDrm:
case X86::MOVSDrm_alt:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
case X86::VMOVSSrm:
case X86::VMOVSSrm_alt:
case X86::VMOVSDrm:
case X86::VMOVSDrm_alt:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
// AVX-512
case X86::VMOVSSZrm:
case X86::VMOVSSZrm_alt:
case X86::VMOVSDZrm:
case X86::VMOVSDZrm_alt:
case X86::VMOVAPDZ128rm:
case X86::VMOVAPDZ256rm:
case X86::VMOVAPDZrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVAPSZrm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVDQU64Zrm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU8Zrm:
case X86::VMOVUPDZ128rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVUPDZrm:
case X86::VMOVUPSZ128rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVUPSZrm: {
// Loads from constant pools are trivially rematerializable.
if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
MI.isDereferenceableInvariantLoad(AA)) {
unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
return false;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
return false;
}
case X86::LEA32r:
case X86::LEA64r: {
if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
!MI.getOperand(1 + X86::AddrDisp).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
return true;
unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
return false;
}
}
// All other instructions marked M_REMATERIALIZABLE are always trivially
// rematerializable.
return true;
}
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) {
// The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
// effects.
int Value;
switch (Orig.getOpcode()) {
case X86::MOV32r0: Value = 0; break;
case X86::MOV32r1: Value = 1; break;
case X86::MOV32r_1: Value = -1; break;
default:
llvm_unreachable("Unexpected instruction!");
}
const DebugLoc &DL = Orig.getDebugLoc();
BuildMI(MBB, I, DL, get(X86::MOV32ri))
.add(Orig.getOperand(0))
.addImm(Value);
} else {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MBB.insert(I, MI);
}
MachineInstr &NewMI = *std::prev(I);
NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
}
}
return false;
}
/// Check whether the shift count for a machine operand is non-zero.
inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
unsigned ShiftAmtOperandIdx) {
// The shift count is six bits with the REX.W prefix and five bits without.
unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
return Imm & ShiftCountMask;
}
/// Check whether the given shift count is appropriate
/// can be represented by a LEA instruction.
inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
// Left shift instructions can be transformed into load-effective-address
// instructions if we can encode them appropriately.
// A LEA instruction utilizes a SIB byte to encode its scale factor.
// The SIB.scale field is two bits wide which means that we can encode any
// shift amount less than 4.
return ShAmt < 4 && ShAmt > 0;
}
bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
unsigned Opc, bool AllowSP, unsigned &NewSrc,
bool &isKill, MachineOperand &ImplicitOp,
LiveVariables *LV) const {
MachineFunction &MF = *MI.getParent()->getParent();
const TargetRegisterClass *RC;
if (AllowSP) {
RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
} else {
RC = Opc != X86::LEA32r ?
&X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
}
unsigned SrcReg = Src.getReg();
// For both LEA64 and LEA32 the register already has essentially the right
// type (32-bit or 64-bit) we may just need to forbid SP.
if (Opc != X86::LEA64_32r) {
NewSrc = SrcReg;
isKill = Src.isKill();
assert(!Src.isUndef() && "Undef op doesn't need optimization");
if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
!MF.getRegInfo().constrainRegClass(NewSrc, RC))
return false;
return true;
}
// This is for an LEA64_32r and incoming registers are 32-bit. One way or
// another we need to add 64-bit registers to the final MI.
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
ImplicitOp = Src;
ImplicitOp.setImplicit();
NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
isKill = Src.isKill();
assert(!Src.isUndef() && "Undef op doesn't need optimization");
} else {
// Virtual register of the wrong class, we have to create a temporary 64-bit
// vreg to feed into the LEA.
NewSrc = MF.getRegInfo().createVirtualRegister(RC);
MachineInstr *Copy =
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
.add(Src);
// Which is obviously going to be dead after we're done with it.
isKill = true;
if (LV)
LV->replaceKillInstruction(SrcReg, MI, *Copy);
}
// We've set all the parameters without issue.
return true;
}
MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
LiveVariables *LV, bool Is8BitOp) const {
// We handle 8-bit adds and various 16-bit opcodes in the switch below.
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
*RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
"Unexpected type for LEA transform");
// TODO: For a 32-bit target, we need to adjust the LEA variables with
// something like this:
// Opcode = X86::LEA32r;
// InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// OutRegLEA =
// Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
// : RegInfo.createVirtualRegister(&X86::GR32RegClass);
if (!Subtarget.is64Bit())
return nullptr;
unsigned Opcode = X86::LEA64_32r;
unsigned InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
unsigned OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// we will be shifting and then extracting the lower 8/16-bits.
// This has the potential to cause partial register stall. e.g.
// movw (%rbp,%rcx,2), %dx
// leal -65(%rdx), %esi
// But testing has shown this *does* help performance in 64-bit mode (at
// least on modern x86 machines).
MachineBasicBlock::iterator MBBI = MI.getIterator();
unsigned Dest = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
bool IsDead = MI.getOperand(0).isDead();
bool IsKill = MI.getOperand(1).isKill();
unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
MachineInstr *InsMI =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(InRegLEA, RegState::Define, SubReg)
.addReg(Src, getKillRegState(IsKill));
MachineInstrBuilder MIB =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL8ri:
case X86::SHL16ri: {
unsigned ShAmt = MI.getOperand(2).getImm();
MIB.addReg(0).addImm(1ULL << ShAmt)
.addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
break;
}
case X86::INC8r:
case X86::INC16r:
addRegOffset(MIB, InRegLEA, true, 1);
break;
case X86::DEC8r:
case X86::DEC16r:
addRegOffset(MIB, InRegLEA, true, -1);
break;
case X86::ADD8ri:
case X86::ADD8ri_DB:
case X86::ADD16ri:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
break;
case X86::ADD8rr:
case X86::ADD8rr_DB:
case X86::ADD16rr:
case X86::ADD16rr_DB: {
unsigned Src2 = MI.getOperand(2).getReg();
bool IsKill2 = MI.getOperand(2).isKill();
assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
unsigned InRegLEA2 = 0;
MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
// ADD8rr/ADD16rr killed %reg1028, %reg1028
// just a single insert_subreg.
addRegReg(MIB, InRegLEA, true, InRegLEA, false);
} else {
if (Subtarget.is64Bit())
InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
else
InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// we will be shifting and then extracting the lower 8/16-bits.
BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(InRegLEA2, RegState::Define, SubReg)
.addReg(Src2, getKillRegState(IsKill2));
addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
}
if (LV && IsKill2 && InsMI2)
LV->replaceKillInstruction(Src2, MI, *InsMI2);
break;
}
}
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(Dest, RegState::Define | getDeadRegState(IsDead))
.addReg(OutRegLEA, RegState::Kill, SubReg);
if (LV) {
// Update live variables.
LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
if (IsKill)
LV->replaceKillInstruction(Src, MI, *InsMI);
if (IsDead)
LV->replaceKillInstruction(Dest, MI, *ExtMI);
}
return ExtMI;
}
/// This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
/// may be able to convert a two-address instruction into a true
/// three-address instruction on demand. This allows the X86 target (for
/// example) to convert ADD and SHL instructions into LEA instructions if they
/// would require register copies due to two-addressness.
///
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
MachineInstr *
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr &MI, LiveVariables *LV) const {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
return nullptr;
MachineFunction &MF = *MI.getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
const MachineOperand &Dest = MI.getOperand(0);
const MachineOperand &Src = MI.getOperand(1);
// Ideally, operations with undef should be folded before we get here, but we
// can't guarantee it. Bail out because optimizing undefs is a waste of time.
// Without this, we have to forward undef state to new register operands to
// avoid machine verifier errors.
if (Src.isUndef())
return nullptr;
if (MI.getNumOperands() > 2)
if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
return nullptr;
MachineInstr *NewMI = nullptr;
bool Is64Bit = Subtarget.is64Bit();
bool Is8BitOp = false;
unsigned MIOpc = MI.getOpcode();
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL64ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
!MF.getRegInfo().constrainRegClass(Src.getReg(),
&X86::GR64_NOSPRegClass))
return nullptr;
NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
.add(Dest)
.addReg(0)
.addImm(1ULL << ShAmt)
.add(Src)
.addImm(0)
.addReg(0);
break;
}
case X86::SHL32ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
// LEA can't handle ESP.
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB =
BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(0)
.addImm(1ULL << ShAmt)
.addReg(SrcReg, getKillRegState(isKill))
.addImm(0)
.addReg(0);
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = MIB;
break;
}
case X86::SHL8ri:
Is8BitOp = true;
LLVM_FALLTHROUGH;
case X86::SHL16ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt))
return nullptr;
return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
}
case X86::INC64r:
case X86::INC32r: {
assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
(Is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB =
BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, 1);
break;
}
case X86::DEC64r:
case X86::DEC32r: {
assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, -1);
break;
}
case X86::DEC8r:
case X86::INC8r:
Is8BitOp = true;
LLVM_FALLTHROUGH;
case X86::DEC16r:
case X86::INC16r:
return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
case X86::ADD32rr_DB: {
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
Opc = X86::LEA64r;
else
Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, ImplicitOp, LV))
return nullptr;
const MachineOperand &Src2 = MI.getOperand(2);
bool isKill2;
unsigned SrcReg2;
MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
SrcReg2, isKill2, ImplicitOp2, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
if (ImplicitOp2.getReg() != 0)
MIB.add(ImplicitOp2);
NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
if (LV && Src2.isKill())
LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
break;
}
case X86::ADD8rr:
case X86::ADD8rr_DB:
Is8BitOp = true;
LLVM_FALLTHROUGH;
case X86::ADD16rr:
case X86::ADD16rr_DB:
return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
case X86::ADD64ri8_DB:
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
MI.getOperand(2));
break;
case X86::ADD32ri:
case X86::ADD32ri8:
case X86::ADD32ri_DB:
case X86::ADD32ri8_DB: {
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, MI.getOperand(2));
break;
}
case X86::ADD8ri:
case X86::ADD8ri_DB:
Is8BitOp = true;
LLVM_FALLTHROUGH;
case X86::ADD16ri:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
case X86::SUB8ri:
case X86::SUB16ri8:
case X86::SUB16ri:
/// FIXME: Support these similar to ADD8ri/ADD16ri*.
return nullptr;
case X86::SUB32ri8:
case X86::SUB32ri: {
+ if (!MI.getOperand(2).isImm())
+ return nullptr;
int64_t Imm = MI.getOperand(2).getImm();
if (!isInt<32>(-Imm))
return nullptr;
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, -Imm);
break;
}
case X86::SUB64ri8:
case X86::SUB64ri32: {
+ if (!MI.getOperand(2).isImm())
+ return nullptr;
int64_t Imm = MI.getOperand(2).getImm();
if (!isInt<32>(-Imm))
return nullptr;
assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
get(X86::LEA64r)).add(Dest).add(Src);
NewMI = addOffset(MIB, -Imm);
break;
}
case X86::VMOVDQU8Z128rmk:
case X86::VMOVDQU8Z256rmk:
case X86::VMOVDQU8Zrmk:
case X86::VMOVDQU16Z128rmk:
case X86::VMOVDQU16Z256rmk:
case X86::VMOVDQU16Zrmk:
case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk:
case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk:
case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk:
case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk:
case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk:
case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk:
case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk:
case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: {
unsigned Opc;
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break;
case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break;
case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break;
case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break;
case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break;
case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break;
case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
}
NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.add(MI.getOperand(2))
.add(Src)
.add(MI.getOperand(3))
.add(MI.getOperand(4))
.add(MI.getOperand(5))
.add(MI.getOperand(6))
.add(MI.getOperand(7));
break;
}
case X86::VMOVDQU8Z128rrk:
case X86::VMOVDQU8Z256rrk:
case X86::VMOVDQU8Zrrk:
case X86::VMOVDQU16Z128rrk:
case X86::VMOVDQU16Z256rrk:
case X86::VMOVDQU16Zrrk:
case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk:
case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk:
case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk:
case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk:
case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk:
case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk:
case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk:
case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: {
unsigned Opc;
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break;
case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break;
case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break;
case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break;
case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
}
NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.add(MI.getOperand(2))
.add(Src)
.add(MI.getOperand(3));
break;
}
}
if (!NewMI) return nullptr;
if (LV) { // Update live variables
if (Src.isKill())
LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
if (Dest.isDead())
LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
}
MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
return NewMI;
}
/// This determines which of three possible cases of a three source commute
/// the source indexes correspond to taking into account any mask operands.
/// All prevents commuting a passthru operand. Returns -1 if the commute isn't
/// possible.
/// Case 0 - Possible to commute the first and second operands.
/// Case 1 - Possible to commute the first and third operands.
/// Case 2 - Possible to commute the second and third operands.
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) {
// Put the lowest index to SrcOpIdx1 to simplify the checks below.
if (SrcOpIdx1 > SrcOpIdx2)
std::swap(SrcOpIdx1, SrcOpIdx2);
unsigned Op1 = 1, Op2 = 2, Op3 = 3;
if (X86II::isKMasked(TSFlags)) {
Op2++;
Op3++;
}
if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
return 0;
if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
return 1;
if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
return 2;
llvm_unreachable("Unknown three src commute case.");
}
unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
const X86InstrFMA3Group &FMA3Group) const {
unsigned Opc = MI.getOpcode();
// TODO: Commuting the 1st operand of FMA*_Int requires some additional
// analysis. The commute optimization is legal only if all users of FMA*_Int
// use only the lowest element of the FMA*_Int instruction. Such analysis are
// not implemented yet. So, just return 0 in that case.
// When such analysis are available this place will be the right place for
// calling it.
assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
"Intrinsic instructions can't commute operand 1");
// Determine which case this commute is or if it can't be done.
unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
SrcOpIdx2);
assert(Case < 3 && "Unexpected case number!");
// Define the FMA forms mapping array that helps to map input FMA form
// to output FMA form to preserve the operation semantics after
// commuting the operands.
const unsigned Form132Index = 0;
const unsigned Form213Index = 1;
const unsigned Form231Index = 2;
static const unsigned FormMapping[][3] = {
// 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
// FMA132 A, C, b; ==> FMA231 C, A, b;
// FMA213 B, A, c; ==> FMA213 A, B, c;
// FMA231 C, A, b; ==> FMA132 A, C, b;
{ Form231Index, Form213Index, Form132Index },
// 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
// FMA132 A, c, B; ==> FMA132 B, c, A;
// FMA213 B, a, C; ==> FMA231 C, a, B;
// FMA231 C, a, B; ==> FMA213 B, a, C;
{ Form132Index, Form231Index, Form213Index },
// 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
// FMA132 a, C, B; ==> FMA213 a, B, C;
// FMA213 b, A, C; ==> FMA132 b, C, A;
// FMA231 c, A, B; ==> FMA231 c, B, A;
{ Form213Index, Form132Index, Form231Index }
};
unsigned FMAForms[3];
FMAForms[0] = FMA3Group.get132Opcode();
FMAForms[1] = FMA3Group.get213Opcode();
FMAForms[2] = FMA3Group.get231Opcode();
unsigned FormIndex;
for (FormIndex = 0; FormIndex < 3; FormIndex++)
if (Opc == FMAForms[FormIndex])
break;
// Everything is ready, just adjust the FMA opcode and return it.
FormIndex = FormMapping[Case][FormIndex];
return FMAForms[FormIndex];
}
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) {
// Determine which case this commute is or if it can't be done.
unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
SrcOpIdx2);
assert(Case < 3 && "Unexpected case value!");
// For each case we need to swap two pairs of bits in the final immediate.
static const uint8_t SwapMasks[3][4] = {
{ 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
{ 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
{ 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
};
uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
// Clear out the bits we are swapping.
uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
SwapMasks[Case][2] | SwapMasks[Case][3]);
// If the immediate had a bit of the pair set, then set the opposite bit.
if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
}
// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
// commuted.
static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
#define VPERM_CASES(Suffix) \
case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \
case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \
case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \
case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \
case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \
case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \
case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \
case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \
case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \
case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \
case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \
case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz:
#define VPERM_CASES_BROADCAST(Suffix) \
VPERM_CASES(Suffix) \
case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \
case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \
case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \
case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz:
switch (Opcode) {
default: return false;
VPERM_CASES(B)
VPERM_CASES_BROADCAST(D)
VPERM_CASES_BROADCAST(PD)
VPERM_CASES_BROADCAST(PS)
VPERM_CASES_BROADCAST(Q)
VPERM_CASES(W)
return true;
}
#undef VPERM_CASES_BROADCAST
#undef VPERM_CASES
}
// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
// from the I opcode to the T opcode and vice versa.
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
#define VPERM_CASES(Orig, New) \
case X86::Orig##128rr: return X86::New##128rr; \
case X86::Orig##128rrkz: return X86::New##128rrkz; \
case X86::Orig##128rm: return X86::New##128rm; \
case X86::Orig##128rmkz: return X86::New##128rmkz; \
case X86::Orig##256rr: return X86::New##256rr; \
case X86::Orig##256rrkz: return X86::New##256rrkz; \
case X86::Orig##256rm: return X86::New##256rm; \
case X86::Orig##256rmkz: return X86::New##256rmkz; \
case X86::Orig##rr: return X86::New##rr; \
case X86::Orig##rrkz: return X86::New##rrkz; \
case X86::Orig##rm: return X86::New##rm; \
case X86::Orig##rmkz: return X86::New##rmkz;
#define VPERM_CASES_BROADCAST(Orig, New) \
VPERM_CASES(Orig, New) \
case X86::Orig##128rmb: return X86::New##128rmb; \
case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
case X86::Orig##256rmb: return X86::New##256rmb; \
case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
case X86::Orig##rmb: return X86::New##rmb; \
case X86::Orig##rmbkz: return X86::New##rmbkz;
switch (Opcode) {
VPERM_CASES(VPERMI2B, VPERMT2B)
VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
VPERM_CASES(VPERMI2W, VPERMT2W)
VPERM_CASES(VPERMT2B, VPERMI2B)
VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
VPERM_CASES(VPERMT2W, VPERMI2W)
}
llvm_unreachable("Unreachable!");
#undef VPERM_CASES_BROADCAST
#undef VPERM_CASES
}
MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
if (NewMI)
return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
return MI;
};
switch (MI.getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
unsigned Opc;
unsigned Size;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
unsigned Amt = MI.getOperand(3).getImm();
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.getOperand(3).setImm(Size - Amt);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::PFSUBrr:
case X86::PFSUBRrr: {
// PFSUB x, y: x = x - y
// PFSUBR x, y: x = y - x
unsigned Opc =
(X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::BLENDPDrri:
case X86::BLENDPSrri:
case X86::VBLENDPDrri:
case X86::VBLENDPSrri:
// If we're optimizing for size, try to use MOVSD/MOVSS.
if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
unsigned Mask, Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break;
case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break;
case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
}
if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.RemoveOperand(3);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
/*NewMI=*/false,
OpIdx1, OpIdx2);
}
}
LLVM_FALLTHROUGH;
case X86::PBLENDWrri:
case X86::VBLENDPDYrri:
case X86::VBLENDPSYrri:
case X86::VPBLENDDrri:
case X86::VPBLENDWrri:
case X86::VPBLENDDYrri:
case X86::VPBLENDWYrri:{
int8_t Mask;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Mask = (int8_t)0x03; break;
case X86::BLENDPSrri: Mask = (int8_t)0x0F; break;
case X86::PBLENDWrri: Mask = (int8_t)0xFF; break;
case X86::VBLENDPDrri: Mask = (int8_t)0x03; break;
case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break;
case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break;
case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break;
case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break;
case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break;
case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break;
case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break;
}
// Only the least significant bits of Imm are used.
// Using int8_t to ensure it will be sign extended to the int64_t that
// setImm takes in order to match isel behavior.
int8_t Imm = MI.getOperand(3).getImm() & Mask;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Mask ^ Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
case X86::VINSERTPSZrr: {
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
unsigned ZMask = Imm & 15;
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
// We can commute insertps if we zero 2 of the elements, the insertion is
// "inline" and we don't override the insertion with a zero.
if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
countPopulation(ZMask) == 2) {
unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
assert(AltIdx < 4 && "Illegal insertion index");
unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
return nullptr;
}
case X86::MOVSDrr:
case X86::MOVSSrr:
case X86::VMOVSDrr:
case X86::VMOVSSrr:{
// On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
if (Subtarget.hasSSE41()) {
unsigned Mask, Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break;
case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break;
case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
}
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
// Convert to SHUFPD.
assert(MI.getOpcode() == X86::MOVSDrr &&
"Can only commute MOVSDrr without SSE4.1");
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(X86::SHUFPDrri));
WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::SHUFPDrri: {
// Commute to MOVSD.
assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(X86::MOVSDrr));
WorkingMI.RemoveOperand(3);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::PCLMULQDQrr:
case X86::VPCLMULQDQrr:
case X86::VPCLMULQDQYrr:
case X86::VPCLMULQDQZrr:
case X86::VPCLMULQDQZ128rr:
case X86::VPCLMULQDQZ256rr: {
// SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
// SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
unsigned Imm = MI.getOperand(3).getImm();
unsigned Src1Hi = Imm & 0x01;
unsigned Src2Hi = Imm & 0x10;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri:
case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri:
case X86::VPCMPBZrri: case X86::VPCMPUBZrri:
case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri:
case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri:
case X86::VPCMPDZrri: case X86::VPCMPUDZrri:
case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri:
case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri:
case X86::VPCMPQZrri: case X86::VPCMPUQZrri:
case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri:
case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri:
case X86::VPCMPWZrri: case X86::VPCMPUWZrri:
case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik:
case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik:
case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik:
case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: {
// Flip comparison mode immediate (if necessary).
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
Imm = X86::getSwappedVPCMPImm(Imm);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPCOMBri: case X86::VPCOMUBri:
case X86::VPCOMDri: case X86::VPCOMUDri:
case X86::VPCOMQri: case X86::VPCOMUQri:
case X86::VPCOMWri: case X86::VPCOMUWri: {
// Flip comparison mode immediate (if necessary).
unsigned Imm = MI.getOperand(3).getImm() & 0x7;
Imm = X86::getSwappedVPCOMImm(Imm);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPERM2F128rr:
case X86::VPERM2I128rr: {
// Flip permute source immediate.
// Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
// Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::MOVHLPSrr:
case X86::UNPCKHPDrr:
case X86::VMOVHLPSrr:
case X86::VUNPCKHPDrr:
case X86::VMOVHLPSZrr:
case X86::VUNPCKHPDZ128rr: {
assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
unsigned Opc = MI.getOpcode();
switch (Opc) {
default: llvm_unreachable("Unreachable!");
case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break;
case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break;
case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break;
case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break;
case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break;
case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break;
}
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: {
auto &WorkingMI = cloneIfNew(MI);
unsigned OpNo = MI.getDesc().getNumOperands() - 1;
X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
case X86::VPTERNLOGDZrrik:
case X86::VPTERNLOGDZ128rrik:
case X86::VPTERNLOGDZ256rrik:
case X86::VPTERNLOGQZrrik:
case X86::VPTERNLOGQZ128rrik:
case X86::VPTERNLOGQZ256rrik:
case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
case X86::VPTERNLOGDZ128rmbi:
case X86::VPTERNLOGDZ256rmbi:
case X86::VPTERNLOGDZrmbi:
case X86::VPTERNLOGQZ128rmbi:
case X86::VPTERNLOGQZ256rmbi:
case X86::VPTERNLOGQZrmbi:
case X86::VPTERNLOGDZ128rmbikz:
case X86::VPTERNLOGDZ256rmbikz:
case X86::VPTERNLOGDZrmbikz:
case X86::VPTERNLOGQZ128rmbikz:
case X86::VPTERNLOGQZ256rmbikz:
case X86::VPTERNLOGQZrmbikz: {
auto &WorkingMI = cloneIfNew(MI);
commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
default: {
if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
MI.getDesc().TSFlags);
if (FMA3Group) {
unsigned Opc =
getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
}
}
bool
X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2,
bool IsIntrinsic) const {
uint64_t TSFlags = MI.getDesc().TSFlags;
unsigned FirstCommutableVecOp = 1;
unsigned LastCommutableVecOp = 3;
unsigned KMaskOp = -1U;
if (X86II::isKMasked(TSFlags)) {
// For k-zero-masked operations it is Ok to commute the first vector
// operand.
// For regular k-masked operations a conservative choice is done as the
// elements of the first vector operand, for which the corresponding bit
// in the k-mask operand is set to 0, are copied to the result of the
// instruction.
// TODO/FIXME: The commute still may be legal if it is known that the
// k-mask operand is set to either all ones or all zeroes.
// It is also Ok to commute the 1st operand if all users of MI use only
// the elements enabled by the k-mask operand. For example,
// v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
// : v1[i];
// VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
// // Ok, to commute v1 in FMADD213PSZrk.
// The k-mask operand has index = 2 for masked and zero-masked operations.
KMaskOp = 2;
// The operand with index = 1 is used as a source for those elements for
// which the corresponding bit in the k-mask is set to 0.
if (X86II::isKMergeMasked(TSFlags))
FirstCommutableVecOp = 3;
LastCommutableVecOp++;
} else if (IsIntrinsic) {
// Commuting the first operand of an intrinsic instruction isn't possible
// unless we can prove that only the lowest element of the result is used.
FirstCommutableVecOp = 2;
}
if (isMem(MI, LastCommutableVecOp))
LastCommutableVecOp--;
// Only the first RegOpsNum operands are commutable.
// Also, the value 'CommuteAnyOperandIndex' is valid here as it means
// that the operand is not specified/fixed.
if (SrcOpIdx1 != CommuteAnyOperandIndex &&
(SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
SrcOpIdx1 == KMaskOp))
return false;
if (SrcOpIdx2 != CommuteAnyOperandIndex &&
(SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
SrcOpIdx2 == KMaskOp))
return false;
// Look for two different register operands assumed to be commutable
// regardless of the FMA opcode. The FMA opcode is adjusted later.
if (SrcOpIdx1 == CommuteAnyOperandIndex ||
SrcOpIdx2 == CommuteAnyOperandIndex) {
unsigned CommutableOpIdx2 = SrcOpIdx2;
// At least one of operands to be commuted is not specified and
// this method is free to choose appropriate commutable operands.
if (SrcOpIdx1 == SrcOpIdx2)
// Both of operands are not fixed. By default set one of commutable
// operands to the last register operand of the instruction.
CommutableOpIdx2 = LastCommutableVecOp;
else if (SrcOpIdx2 == CommuteAnyOperandIndex)
// Only one of operands is not fixed.
CommutableOpIdx2 = SrcOpIdx1;
// CommutableOpIdx2 is well defined now. Let's choose another commutable
// operand and assign its index to CommutableOpIdx1.
unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
unsigned CommutableOpIdx1;
for (CommutableOpIdx1 = LastCommutableVecOp;
CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
// Just ignore and skip the k-mask operand.
if (CommutableOpIdx1 == KMaskOp)
continue;
// The commuted operands must have different registers.
// Otherwise, the commute transformation does not change anything and
// is useless then.
if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
break;
}
// No appropriate commutable operands were found.
if (CommutableOpIdx1 < FirstCommutableVecOp)
return false;
// Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
// to return those values.
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
}
return true;
}
bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
const MCInstrDesc &Desc = MI.getDesc();
if (!Desc.isCommutable())
return false;
switch (MI.getOpcode()) {
case X86::CMPSDrr:
case X86::CMPSSrr:
case X86::CMPPDrri:
case X86::CMPPSrri:
case X86::VCMPSDrr:
case X86::VCMPSSrr:
case X86::VCMPPDrri:
case X86::VCMPPSrri:
case X86::VCMPPDYrri:
case X86::VCMPPSYrri:
case X86::VCMPSDZrr:
case X86::VCMPSSZrr:
case X86::VCMPPDZrri:
case X86::VCMPPSZrri:
case X86::VCMPPDZ128rri:
case X86::VCMPPSZ128rri:
case X86::VCMPPDZ256rri:
case X86::VCMPPSZ256rri:
case X86::VCMPPDZrrik:
case X86::VCMPPSZrrik:
case X86::VCMPPDZ128rrik:
case X86::VCMPPSZ128rrik:
case X86::VCMPPDZ256rrik:
case X86::VCMPPSZ256rrik: {
unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
// Float comparison can be safely commuted for
// Ordered/Unordered/Equal/NotEqual tests
unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
switch (Imm) {
case 0x00: // EQUAL
case 0x03: // UNORDERED
case 0x04: // NOT EQUAL
case 0x07: // ORDERED
// The indices of the commutable operands are 1 and 2 (or 2 and 3
// when masked).
// Assign them to the returned operand indices here.
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2 + OpOffset);
}
return false;
}
case X86::MOVSSrr:
// X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
// form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
// AVX implies sse4.1.
if (Subtarget.hasSSE41())
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
return false;
case X86::SHUFPDrri:
// We can commute this to MOVSD.
if (MI.getOperand(3).getImm() == 0x02)
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
return false;
case X86::MOVHLPSrr:
case X86::UNPCKHPDrr:
case X86::VMOVHLPSrr:
case X86::VUNPCKHPDrr:
case X86::VMOVHLPSZrr:
case X86::VUNPCKHPDZ128rr:
if (Subtarget.hasSSE2())
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
return false;
case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
case X86::VPTERNLOGDZrrik:
case X86::VPTERNLOGDZ128rrik:
case X86::VPTERNLOGDZ256rrik:
case X86::VPTERNLOGQZrrik:
case X86::VPTERNLOGQZ128rrik:
case X86::VPTERNLOGQZ256rrik:
case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
case X86::VPTERNLOGDZ128rmbi:
case X86::VPTERNLOGDZ256rmbi:
case X86::VPTERNLOGDZrmbi:
case X86::VPTERNLOGQZ128rmbi:
case X86::VPTERNLOGQZ256rmbi:
case X86::VPTERNLOGQZrmbi:
case X86::VPTERNLOGDZ128rmbikz:
case X86::VPTERNLOGDZ256rmbikz:
case X86::VPTERNLOGDZrmbikz:
case X86::VPTERNLOGQZ128rmbikz:
case X86::VPTERNLOGQZ256rmbikz:
case X86::VPTERNLOGQZrmbikz:
return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
case X86::VPMADD52HUQZ128r:
case X86::VPMADD52HUQZ128rk:
case X86::VPMADD52HUQZ128rkz:
case X86::VPMADD52HUQZ256r:
case X86::VPMADD52HUQZ256rk:
case X86::VPMADD52HUQZ256rkz:
case X86::VPMADD52HUQZr:
case X86::VPMADD52HUQZrk:
case X86::VPMADD52HUQZrkz:
case X86::VPMADD52LUQZ128r:
case X86::VPMADD52LUQZ128rk:
case X86::VPMADD52LUQZ128rkz:
case X86::VPMADD52LUQZ256r:
case X86::VPMADD52LUQZ256rk:
case X86::VPMADD52LUQZ256rkz:
case X86::VPMADD52LUQZr:
case X86::VPMADD52LUQZrk:
case X86::VPMADD52LUQZrkz: {
unsigned CommutableOpIdx1 = 2;
unsigned CommutableOpIdx2 = 3;
if (X86II::isKMasked(Desc.TSFlags)) {
// Skip the mask register.
++CommutableOpIdx1;
++CommutableOpIdx2;
}
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI.getOperand(SrcOpIdx1).isReg() ||
!MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
default:
const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
MI.getDesc().TSFlags);
if (FMA3Group)
return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
FMA3Group->isIntrinsic());
// Handled masked instructions since we need to skip over the mask input
// and the preserved input.
if (X86II::isKMasked(Desc.TSFlags)) {
// First assume that the first input is the mask operand and skip past it.
unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
// Check if the first input is tied. If there isn't one then we only
// need to skip the mask operand which we did above.
if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
MCOI::TIED_TO) != -1)) {
// If this is zero masking instruction with a tied operand, we need to
// move the first index back to the first input since this must
// be a 3 input instruction and we want the first two non-mask inputs.
// Otherwise this is a 2 input instruction with a preserved input and
// mask, so we need to move the indices to skip one more input.
if (X86II::isKMergeMasked(Desc.TSFlags)) {
++CommutableOpIdx1;
++CommutableOpIdx2;
} else {
--CommutableOpIdx1;
}
}
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI.getOperand(SrcOpIdx1).isReg() ||
!MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
}
return false;
}
X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::JCC_1:
return static_cast<X86::CondCode>(
MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
}
}
/// Return condition code of a SETCC opcode.
X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::SETCCr: case X86::SETCCm:
return static_cast<X86::CondCode>(
MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
}
}
/// Return condition code of a CMov opcode.
X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
return static_cast<X86::CondCode>(
MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
}
}
/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
case X86::COND_E: return X86::COND_NE;
case X86::COND_NE: return X86::COND_E;
case X86::COND_L: return X86::COND_GE;
case X86::COND_LE: return X86::COND_G;
case X86::COND_G: return X86::COND_LE;
case X86::COND_GE: return X86::COND_L;
case X86::COND_B: return X86::COND_AE;
case X86::COND_BE: return X86::COND_A;
case X86::COND_A: return X86::COND_BE;
case X86::COND_AE: return X86::COND_B;
case X86::COND_S: return X86::COND_NS;
case X86::COND_NS: return X86::COND_S;
case X86::COND_P: return X86::COND_NP;
case X86::COND_NP: return X86::COND_P;
case X86::COND_O: return X86::COND_NO;
case X86::COND_NO: return X86::COND_O;
case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
}
}
/// Assuming the flags are set by MI(a,b), return the condition code if we
/// modify the instructions such that flags are set by MI(b,a).
static X86::CondCode getSwappedCondition(X86::CondCode CC) {
switch (CC) {
default: return X86::COND_INVALID;
case X86::COND_E: return X86::COND_E;
case X86::COND_NE: return X86::COND_NE;
case X86::COND_L: return X86::COND_G;
case X86::COND_LE: return X86::COND_GE;
case X86::COND_G: return X86::COND_L;
case X86::COND_GE: return X86::COND_LE;
case X86::COND_B: return X86::COND_A;
case X86::COND_BE: return X86::COND_AE;
case X86::COND_A: return X86::COND_B;
case X86::COND_AE: return X86::COND_BE;
}
}
std::pair<X86::CondCode, bool>
X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
X86::CondCode CC = X86::COND_INVALID;
bool NeedSwap = false;
switch (Predicate) {
default: break;
// Floating-point Predicates
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
// Integer Predicates
case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
}
return std::make_pair(CC, NeedSwap);
}
/// Return a setcc opcode based on whether it has memory operand.
unsigned X86::getSETOpc(bool HasMemoryOperand) {
return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
}
/// Return a cmov opcode for the given register size in bytes, and operand type.
unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
switch(RegBytes) {
default: llvm_unreachable("Illegal register size!");
case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
case 8: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV64rr;
}
}
/// Get the VPCMP immediate for the given condition.
unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: return 4;
case ISD::SETEQ: return 0;
case ISD::SETULT:
case ISD::SETLT: return 1;
case ISD::SETUGT:
case ISD::SETGT: return 6;
case ISD::SETUGE:
case ISD::SETGE: return 5;
case ISD::SETULE:
case ISD::SETLE: return 2;
}
}
/// Get the VPCMP immediate if the opcodes are swapped.
unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
switch (Imm) {
default: llvm_unreachable("Unreachable!");
case 0x01: Imm = 0x06; break; // LT -> NLE
case 0x02: Imm = 0x05; break; // LE -> NLT
case 0x05: Imm = 0x02; break; // NLT -> LE
case 0x06: Imm = 0x01; break; // NLE -> LT
case 0x00: // EQ
case 0x03: // FALSE
case 0x04: // NE
case 0x07: // TRUE
break;
}
return Imm;
}
/// Get the VPCOM immediate if the opcodes are swapped.
unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
switch (Imm) {
default: llvm_unreachable("Unreachable!");
case 0x00: Imm = 0x02; break; // LT -> GT
case 0x01: Imm = 0x03; break; // LE -> GE
case 0x02: Imm = 0x00; break; // GT -> LT
case 0x03: Imm = 0x01; break; // GE -> LE
case 0x04: // EQ
case 0x05: // NE
case 0x06: // FALSE
case 0x07: // TRUE
break;
}
return Imm;
}
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
if (!MI.isTerminator()) return false;
// Conditional branch is a special case.
if (MI.isBranch() && !MI.isBarrier())
return true;
if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64:
return true;
default:
return false;
}
}
bool X86InstrInfo::canMakeTailCallConditional(
SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
if (TailCall.getOpcode() != X86::TCRETURNdi &&
TailCall.getOpcode() != X86::TCRETURNdi64) {
// Only direct calls can be done with a conditional branch.
return false;
}
const MachineFunction *MF = TailCall.getParent()->getParent();
if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
// Conditional tail calls confuse the Win64 unwinder.
return false;
}
assert(BranchCond.size() == 1);
if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
// Can't make a conditional tail call with this condition.
return false;
}
const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
if (X86FI->getTCReturnAddrDelta() != 0 ||
TailCall.getOperand(1).getImm() != 0) {
// A conditional tail call cannot do any stack adjustment.
return false;
}
return true;
}
void X86InstrInfo::replaceBranchWithTailCall(
MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
assert(canMakeTailCallConditional(BranchCond, TailCall));
MachineBasicBlock::iterator I = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
if (!I->isBranch())
assert(0 && "Can't find the branch to replace!");
X86::CondCode CC = X86::getCondFromBranch(*I);
assert(BranchCond.size() == 1);
if (CC != BranchCond[0].getImm())
continue;
break;
}
unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
: X86::TCRETURNdi64cc;
auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
MIB->addOperand(TailCall.getOperand(0)); // Destination.
MIB.addImm(0); // Stack offset (not used).
MIB->addOperand(BranchCond[0]); // Condition.
MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
// Add implicit uses and defs of all live regs potentially clobbered by the
// call. This way they still appear live across the call.
LivePhysRegs LiveRegs(getRegisterInfo());
LiveRegs.addLiveOuts(MBB);
SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
LiveRegs.stepForward(*MIB, Clobbers);
for (const auto &C : Clobbers) {
MIB.addReg(C.first, RegState::Implicit);
MIB.addReg(C.first, RegState::Implicit | RegState::Define);
}
I->eraseFromParent();
}
// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
// not be a fallthrough MBB now due to layout changes). Return nullptr if the
// fallthrough MBB cannot be identified.
static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
MachineBasicBlock *TBB) {
// Look for non-EHPad successors other than TBB. If we find exactly one, it
// is the fallthrough MBB. If we find zero, then TBB is both the target MBB
// and fallthrough MBB. If we find more than one, we cannot identify the
// fallthrough MBB and should return nullptr.
MachineBasicBlock *FallthroughBB = nullptr;
for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
continue;
// Return a nullptr if we found more than one fallthrough successor.
if (FallthroughBB && FallthroughBB != TBB)
return nullptr;
FallthroughBB = *SI;
}
return FallthroughBB;
}
bool X86InstrInfo::AnalyzeBranchImpl(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
MachineBasicBlock::iterator UnCondBrIter = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
// analysis.
if (!I->isBranch())
return true;
// Handle unconditional branches.
if (I->getOpcode() == X86::JMP_1) {
UnCondBrIter = I;
if (!AllowModify) {
TBB = I->getOperand(0).getMBB();
continue;
}
// If the block has any instructions after a JMP, delete them.
while (std::next(I) != MBB.end())
std::next(I)->eraseFromParent();
Cond.clear();
FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
UnCondBrIter = MBB.end();
continue;
}
// TBB is used to indicate the unconditional destination.
TBB = I->getOperand(0).getMBB();
continue;
}
// Handle conditional branches.
X86::CondCode BranchCode = X86::getCondFromBranch(*I);
if (BranchCode == X86::COND_INVALID)
return true; // Can't handle indirect branch.
// In practice we should never have an undef eflags operand, if we do
// abort here as we are not prepared to preserve the flag.
if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
return true;
// Working from the bottom, handle the first conditional branch.
if (Cond.empty()) {
MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
if (AllowModify && UnCondBrIter != MBB.end() &&
MBB.isLayoutSuccessor(TargetBB)) {
// If we can modify the code and it ends in something like:
//
// jCC L1
// jmp L2
// L1:
// ...
// L2:
//
// Then we can change this to:
//
// jnCC L2
// L1:
// ...
// L2:
//
// Which is a bit more efficient.
// We conditionally jump to the fall-through block.
BranchCode = GetOppositeBranchCondition(BranchCode);
MachineBasicBlock::iterator OldInst = I;
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
.addMBB(UnCondBrIter->getOperand(0).getMBB())
.addImm(BranchCode);
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
.addMBB(TargetBB);
OldInst->eraseFromParent();
UnCondBrIter->eraseFromParent();
// Restart the analysis.
UnCondBrIter = MBB.end();
I = MBB.end();
continue;
}
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
CondBranches.push_back(&*I);
continue;
}
// Handle subsequent conditional branches. Only handle the case where all
// conditional branches branch to the same destination and their condition
// opcodes fit one of the special multi-branch idioms.
assert(Cond.size() == 1);
assert(TBB);
// If the conditions are the same, we can leave them alone.
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
auto NewTBB = I->getOperand(0).getMBB();
if (OldBranchCode == BranchCode && TBB == NewTBB)
continue;
// If they differ, see if they fit one of the known patterns. Theoretically,
// we could handle more patterns here, but we shouldn't expect to see them
// if instruction selection has done a reasonable job.
if (TBB == NewTBB &&
((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
(OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
BranchCode = X86::COND_NE_OR_P;
} else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
(OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
return true;
// X86::COND_E_AND_NP usually has two different branch destinations.
//
// JP B1
// JE B2
// JMP B1
// B1:
// B2:
//
// Here this condition branches to B2 only if NP && E. It has another
// equivalent form:
//
// JNE B1
// JNP B2
// JMP B1
// B1:
// B2:
//
// Similarly it branches to B2 only if E && NP. That is why this condition
// is named with COND_E_AND_NP.
BranchCode = X86::COND_E_AND_NP;
} else
return true;
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
CondBranches.push_back(&*I);
}
return false;
}
bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
SmallVector<MachineInstr *, 4> CondBranches;
return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
}
bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const {
using namespace std::placeholders;
SmallVector<MachineOperand, 4> Cond;
SmallVector<MachineInstr *, 4> CondBranches;
if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
AllowModify))
return true;
if (Cond.size() != 1)
return true;
assert(MBP.TrueDest && "expected!");
if (!MBP.FalseDest)
MBP.FalseDest = MBB.getNextNode();
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineInstr *ConditionDef = nullptr;
bool SingleUseCondition = true;
for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
if (I->modifiesRegister(X86::EFLAGS, TRI)) {
ConditionDef = &*I;
break;
}
if (I->readsRegister(X86::EFLAGS, TRI))
SingleUseCondition = false;
}
if (!ConditionDef)
return true;
if (SingleUseCondition) {
for (auto *Succ : MBB.successors())
if (Succ->isLiveIn(X86::EFLAGS))
SingleUseCondition = false;
}
MBP.ConditionDef = ConditionDef;
MBP.SingleUseCondition = SingleUseCondition;
// Currently we only recognize the simple pattern:
//
// test %reg, %reg
// je %label
//
const unsigned TestOpcode =
Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
if (ConditionDef->getOpcode() == TestOpcode &&
ConditionDef->getNumOperands() == 3 &&
ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
(Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
MBP.LHS = ConditionDef->getOperand(0);
MBP.RHS = MachineOperand::CreateImm(0);
MBP.Predicate = Cond[0].getImm() == X86::COND_NE
? MachineBranchPredicate::PRED_NE
: MachineBranchPredicate::PRED_EQ;
return false;
}
return true;
}
unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
if (I->getOpcode() != X86::JMP_1 &&
X86::getCondFromBranch(*I) == X86::COND_INVALID)
break;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
++Count;
}
return Count;
}
unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded) const {
// Shouldn't be a fall through.
assert(TBB && "insertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
"X86 branch conditions have one component!");
assert(!BytesAdded && "code size not handled");
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
return 1;
}
// If FBB is null, it is implied to be a fall-through block.
bool FallThru = FBB == nullptr;
// Conditional branch.
unsigned Count = 0;
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
switch (CC) {
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
++Count;
BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
++Count;
break;
case X86::COND_E_AND_NP:
// Use the next block of MBB as FBB if it is null.
if (FBB == nullptr) {
FBB = getFallThroughMBB(&MBB, TBB);
assert(FBB && "MBB cannot be the last block in function when the false "
"body is a fall-through.");
}
// Synthesize COND_E_AND_NP with two branches.
BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
++Count;
BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
++Count;
break;
default: {
BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
++Count;
}
}
if (!FallThru) {
// Two-way Conditional branch. Insert the second branch.
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
++Count;
}
return Count;
}
bool X86InstrInfo::
canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
// Not all subtargets have cmov instructions.
if (!Subtarget.hasCMov())
return false;
if (Cond.size() != 1)
return false;
// We cannot do the composite conditions, at least not in SSA form.
if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
return false;
// Check register classes.
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC =
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
if (!RC)
return false;
// We have cmov instructions for 16, 32, and 64 bit general purpose registers.
if (X86::GR16RegClass.hasSubClassEq(RC) ||
X86::GR32RegClass.hasSubClassEq(RC) ||
X86::GR64RegClass.hasSubClassEq(RC)) {
// This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
// Bridge. Probably Ivy Bridge as well.
CondCycles = 2;
TrueCycles = 2;
FalseCycles = 2;
return true;
}
// Can't do vectors.
return false;
}
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
assert(Cond.size() == 1 && "Invalid Cond array");
unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
false /*HasMemoryOperand*/);
BuildMI(MBB, I, DL, get(Opc), DstReg)
.addReg(FalseReg)
.addReg(TrueReg)
.addImm(Cond[0].getImm());
}
/// Test if the given register is a physical h register.
static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
}
// Try and copy between VR128/VR64 and GR64 registers.
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
const X86Subtarget &Subtarget) {
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
// SrcReg(MaskReg) -> DestReg(GR64)
// SrcReg(MaskReg) -> DestReg(GR32)
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
if (X86::VK16RegClass.contains(SrcReg)) {
if (X86::GR64RegClass.contains(DestReg)) {
assert(Subtarget.hasBWI());
return X86::KMOVQrk;
}
if (X86::GR32RegClass.contains(DestReg))
return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
}
// SrcReg(GR64) -> DestReg(MaskReg)
// SrcReg(GR32) -> DestReg(MaskReg)
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
if (X86::VK16RegClass.contains(DestReg)) {
if (X86::GR64RegClass.contains(SrcReg)) {
assert(Subtarget.hasBWI());
return X86::KMOVQkr;
}
if (X86::GR32RegClass.contains(SrcReg))
return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
}
// SrcReg(VR128) -> DestReg(GR64)
// SrcReg(VR64) -> DestReg(GR64)
// SrcReg(GR64) -> DestReg(VR128)
// SrcReg(GR64) -> DestReg(VR64)
if (X86::GR64RegClass.contains(DestReg)) {
if (X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
return HasAVX512 ? X86::VMOVPQIto64Zrr :
HasAVX ? X86::VMOVPQIto64rr :
X86::MOVPQIto64rr;
if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MMX_MOVD64from64rr;
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
if (X86::VR128XRegClass.contains(DestReg))
return HasAVX512 ? X86::VMOV64toPQIZrr :
HasAVX ? X86::VMOV64toPQIrr :
X86::MOV64toPQIrr;
// Copy from a GR64 register to a VR64 register.
if (X86::VR64RegClass.contains(DestReg))
return X86::MMX_MOVD64to64rr;
}
// SrcReg(VR128) -> DestReg(GR32)
// SrcReg(GR32) -> DestReg(VR128)
if (X86::GR32RegClass.contains(DestReg) &&
X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR32 register.
return HasAVX512 ? X86::VMOVPDI2DIZrr :
HasAVX ? X86::VMOVPDI2DIrr :
X86::MOVPDI2DIrr;
if (X86::VR128XRegClass.contains(DestReg) &&
X86::GR32RegClass.contains(SrcReg))
// Copy from a VR128 register to a VR128 register.
return HasAVX512 ? X86::VMOVDI2PDIZrr :
HasAVX ? X86::VMOVDI2PDIrr :
X86::MOVDI2PDIrr;
return 0;
}
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = Subtarget.hasAVX();
bool HasVLX = Subtarget.hasVLX();
unsigned Opc = 0;
if (X86::GR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV64rr;
else if (X86::GR32RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV32rr;
else if (X86::GR16RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV16rr;
else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if ((isHReg(DestReg) || isHReg(SrcReg)) &&
Subtarget.is64Bit()) {
Opc = X86::MOV8rr_NOREX;
// Both operands must be encodable without an REX prefix.
assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
"8-bit H register can not be copied outside GR8_NOREX");
} else
Opc = X86::MOV8rr;
}
else if (X86::VR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MMX_MOVQ64rr;
else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
if (HasVLX)
Opc = X86::VMOVAPSZ128rr;
else if (X86::VR128RegClass.contains(DestReg, SrcReg))
Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
else {
// If this an extended register and we don't have VLX we need to use a
// 512-bit move.
Opc = X86::VMOVAPSZrr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
&X86::VR512RegClass);
SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
&X86::VR512RegClass);
}
} else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
if (HasVLX)
Opc = X86::VMOVAPSZ256rr;
else if (X86::VR256RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSYrr;
else {
// If this an extended register and we don't have VLX we need to use a
// 512-bit move.
Opc = X86::VMOVAPSZrr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
&X86::VR512RegClass);
SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
&X86::VR512RegClass);
}
} else if (X86::VR512RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSZrr;
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
else if (X86::VK16RegClass.contains(DestReg, SrcReg))
Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
if (!Opc)
Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
if (Opc) {
BuildMI(MBB, MI, DL, get(Opc), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
// FIXME: We use a fatal error here because historically LLVM has tried
// lower some of these physreg copies and we want to ensure we get
// reasonable bug reports if someone encounters a case no other testing
// found. This path should be removed after the LLVM 7 release.
report_fatal_error("Unable to copy EFLAGS physical register!");
}
LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
<< RI.getName(DestReg) << '\n');
report_fatal_error("Cannot emit physreg copy instruction");
}
bool X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI,
const MachineOperand *&Src,
const MachineOperand *&Dest) const {
if (MI.isMoveReg()) {
Dest = &MI.getOperand(0);
Src = &MI.getOperand(1);
return true;
}
return false;
}
static unsigned getLoadStoreRegOpcode(unsigned Reg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI,
bool load) {
bool HasAVX = STI.hasAVX();
bool HasAVX512 = STI.hasAVX512();
bool HasVLX = STI.hasVLX();
switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
default:
llvm_unreachable("Unknown spill size");
case 1:
assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
if (STI.is64Bit())
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
return load ? X86::MOV8rm : X86::MOV8mr;
case 2:
if (X86::VK16RegClass.hasSubClassEq(RC))
return load ? X86::KMOVWkm : X86::KMOVWmk;
assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
return load ? X86::MOV16rm : X86::MOV16mr;
case 4:
if (X86::GR32RegClass.hasSubClassEq(RC))
return load ? X86::MOV32rm : X86::MOV32mr;
if (X86::FR32XRegClass.hasSubClassEq(RC))
return load ?
(HasAVX512 ? X86::VMOVSSZrm_alt :
HasAVX ? X86::VMOVSSrm_alt :
X86::MOVSSrm_alt) :
(HasAVX512 ? X86::VMOVSSZmr :
HasAVX ? X86::VMOVSSmr :
X86::MOVSSmr);
if (X86::RFP32RegClass.hasSubClassEq(RC))
return load ? X86::LD_Fp32m : X86::ST_Fp32m;
if (X86::VK32RegClass.hasSubClassEq(RC)) {
assert(STI.hasBWI() && "KMOVD requires BWI");
return load ? X86::KMOVDkm : X86::KMOVDmk;
}
// All of these mask pair classes have the same spill size, the same kind
// of kmov instructions can be used with all of them.
if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
X86::VK16PAIRRegClass.hasSubClassEq(RC))
return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
llvm_unreachable("Unknown 4-byte regclass");
case 8:
if (X86::GR64RegClass.hasSubClassEq(RC))
return load ? X86::MOV64rm : X86::MOV64mr;
if (X86::FR64XRegClass.hasSubClassEq(RC))
return load ?
(HasAVX512 ? X86::VMOVSDZrm_alt :
HasAVX ? X86::VMOVSDrm_alt :
X86::MOVSDrm_alt) :
(HasAVX512 ? X86::VMOVSDZmr :
HasAVX ? X86::VMOVSDmr :
X86::MOVSDmr);
if (X86::VR64RegClass.hasSubClassEq(RC))
return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
if (X86::RFP64RegClass.hasSubClassEq(RC))
return load ? X86::LD_Fp64m : X86::ST_Fp64m;
if (X86::VK64RegClass.hasSubClassEq(RC)) {
assert(STI.hasBWI() && "KMOVQ requires BWI");
return load ? X86::KMOVQkm : X86::KMOVQmk;
}
llvm_unreachable("Unknown 8-byte regclass");
case 10:
assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
case 16: {
if (X86::VR128XRegClass.hasSubClassEq(RC)) {
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ?
(HasVLX ? X86::VMOVAPSZ128rm :
HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
HasAVX ? X86::VMOVAPSrm :
X86::MOVAPSrm):
(HasVLX ? X86::VMOVAPSZ128mr :
HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
HasAVX ? X86::VMOVAPSmr :
X86::MOVAPSmr);
else
return load ?
(HasVLX ? X86::VMOVUPSZ128rm :
HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
HasAVX ? X86::VMOVUPSrm :
X86::MOVUPSrm):
(HasVLX ? X86::VMOVUPSZ128mr :
HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
HasAVX ? X86::VMOVUPSmr :
X86::MOVUPSmr);
}
if (X86::BNDRRegClass.hasSubClassEq(RC)) {
if (STI.is64Bit())
return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
else
return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
}
llvm_unreachable("Unknown 16-byte regclass");
}
case 32:
assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ?
(HasVLX ? X86::VMOVAPSZ256rm :
HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
X86::VMOVAPSYrm) :
(HasVLX ? X86::VMOVAPSZ256mr :
HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
X86::VMOVAPSYmr);
else
return load ?
(HasVLX ? X86::VMOVUPSZ256rm :
HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
X86::VMOVUPSYrm) :
(HasVLX ? X86::VMOVUPSZ256mr :
HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
X86::VMOVUPSYmr);
case 64:
assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
if (isStackAligned)
return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
else
return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
}
}
bool X86InstrInfo::getMemOperandWithOffset(
const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemOp.getDesc();
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemRefBegin < 0)
return false;
MemRefBegin += X86II::getOperandBias(Desc);
BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
if (!BaseOp->isReg()) // Can be an MO_FrameIndex
return false;
if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
return false;
if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
X86::NoRegister)
return false;
const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
// Displacement can be symbolic
if (!DispMO.isImm())
return false;
Offset = DispMO.getImm();
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
"operands of type register.");
return true;
}
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI) {
return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
}
static unsigned getLoadRegOpcode(unsigned DestReg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI) {
return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
"Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
.addReg(SrcReg, getKillRegState(isKill));
}
void X86InstrInfo::storeRegToAddr(
MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx);
}
void X86InstrInfo::loadRegFromAddr(
MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
switch (MI.getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
if (MI.getOperand(1).isImm()) {
CmpMask = ~0;
CmpValue = MI.getOperand(1).getImm();
} else {
CmpMask = CmpValue = 0;
}
return true;
// A SUB can be used to perform comparison.
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = 0;
CmpValue = 0;
return true;
case X86::SUB64rr:
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = MI.getOperand(2).getReg();
CmpMask = 0;
CmpValue = 0;
return true;
case X86::SUB64ri32:
case X86::SUB64ri8:
case X86::SUB32ri:
case X86::SUB32ri8:
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
if (MI.getOperand(2).isImm()) {
CmpMask = ~0;
CmpValue = MI.getOperand(2).getImm();
} else {
CmpMask = CmpValue = 0;
}
return true;
case X86::CMP64rr:
case X86::CMP32rr:
case X86::CMP16rr:
case X86::CMP8rr:
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = MI.getOperand(1).getReg();
CmpMask = 0;
CmpValue = 0;
return true;
case X86::TEST8rr:
case X86::TEST16rr:
case X86::TEST32rr:
case X86::TEST64rr:
SrcReg = MI.getOperand(0).getReg();
if (MI.getOperand(1).getReg() != SrcReg)
return false;
// Compare against zero.
SrcReg2 = 0;
CmpMask = ~0;
CmpValue = 0;
return true;
}
return false;
}
/// Check whether the first instruction, whose only
/// purpose is to update flags, can be made redundant.
/// CMPrr can be made redundant by SUBrr if the operands are the same.
/// This function can be extended later on.
/// SrcReg, SrcRegs: register operands for FlagI.
/// ImmValue: immediate for FlagI if it takes an immediate.
inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
unsigned SrcReg, unsigned SrcReg2,
int ImmMask, int ImmValue,
const MachineInstr &OI) {
if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
(FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
(FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
(FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
((OI.getOperand(1).getReg() == SrcReg &&
OI.getOperand(2).getReg() == SrcReg2) ||
(OI.getOperand(1).getReg() == SrcReg2 &&
OI.getOperand(2).getReg() == SrcReg)))
return true;
if (ImmMask != 0 &&
((FlagI.getOpcode() == X86::CMP64ri32 &&
OI.getOpcode() == X86::SUB64ri32) ||
(FlagI.getOpcode() == X86::CMP64ri8 &&
OI.getOpcode() == X86::SUB64ri8) ||
(FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
(FlagI.getOpcode() == X86::CMP32ri8 &&
OI.getOpcode() == X86::SUB32ri8) ||
(FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
(FlagI.getOpcode() == X86::CMP16ri8 &&
OI.getOpcode() == X86::SUB16ri8) ||
(FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
OI.getOperand(1).getReg() == SrcReg &&
OI.getOperand(2).getImm() == ImmValue)
return true;
return false;
}
/// Check whether the definition can be converted
/// to remove a comparison against zero.
inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) {
NoSignFlag = false;
switch (MI.getOpcode()) {
default: return false;
// The shift instructions only modify ZF if their shift count is non-zero.
// N.B.: The processor truncates the shift count depending on the encoding.
case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
return getTruncatedShiftCount(MI, 2) != 0;
// Some left shift instructions can be turned into LEA instructions but only
// if their flags aren't used. Avoid transforming such instructions.
case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (isTruncatedShiftCountForLEA(ShAmt)) return false;
return ShAmt != 0;
}
case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
return getTruncatedShiftCount(MI, 3) != 0;
case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
case X86::ANDN32rr: case X86::ANDN32rm:
case X86::ANDN64rr: case X86::ANDN64rm:
case X86::BLSI32rr: case X86::BLSI32rm:
case X86::BLSI64rr: case X86::BLSI64rm:
case X86::BLSMSK32rr:case X86::BLSMSK32rm:
case X86::BLSMSK64rr:case X86::BLSMSK64rm:
case X86::BLSR32rr: case X86::BLSR32rm:
case X86::BLSR64rr: case X86::BLSR64rm:
case X86::BZHI32rr: case X86::BZHI32rm:
case X86::BZHI64rr: case X86::BZHI64rm:
case X86::LZCNT16rr: case X86::LZCNT16rm:
case X86::LZCNT32rr: case X86::LZCNT32rm:
case X86::LZCNT64rr: case X86::LZCNT64rm:
case X86::POPCNT16rr:case X86::POPCNT16rm:
case X86::POPCNT32rr:case X86::POPCNT32rm:
case X86::POPCNT64rr:case X86::POPCNT64rm:
case X86::TZCNT16rr: case X86::TZCNT16rm:
case X86::TZCNT32rr: case X86::TZCNT32rm:
case X86::TZCNT64rr: case X86::TZCNT64rm:
case X86::BLCFILL32rr: case X86::BLCFILL32rm:
case X86::BLCFILL64rr: case X86::BLCFILL64rm:
case X86::BLCI32rr: case X86::BLCI32rm:
case X86::BLCI64rr: case X86::BLCI64rm:
case X86::BLCIC32rr: case X86::BLCIC32rm:
case X86::BLCIC64rr: case X86::BLCIC64rm:
case X86::BLCMSK32rr: case X86::BLCMSK32rm:
case X86::BLCMSK64rr: case X86::BLCMSK64rm:
case X86::BLCS32rr: case X86::BLCS32rm:
case X86::BLCS64rr: case X86::BLCS64rm:
case X86::BLSFILL32rr: case X86::BLSFILL32rm:
case X86::BLSFILL64rr: case X86::BLSFILL64rm:
case X86::BLSIC32rr: case X86::BLSIC32rm:
case X86::BLSIC64rr: case X86::BLSIC64rm:
case X86::T1MSKC32rr: case X86::T1MSKC32rm:
case X86::T1MSKC64rr: case X86::T1MSKC64rm:
case X86::TZMSK32rr: case X86::TZMSK32rm:
case X86::TZMSK64rr: case X86::TZMSK64rm:
return true;
case X86::BEXTR32rr: case X86::BEXTR64rr:
case X86::BEXTR32rm: case X86::BEXTR64rm:
case X86::BEXTRI32ri: case X86::BEXTRI32mi:
case X86::BEXTRI64ri: case X86::BEXTRI64mi:
// BEXTR doesn't update the sign flag so we can't use it.
NoSignFlag = true;
return true;
}
}
/// Check whether the use can be converted to remove a comparison against zero.
static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::NEG8r:
case X86::NEG16r:
case X86::NEG32r:
case X86::NEG64r:
return X86::COND_AE;
case X86::LZCNT16rr:
case X86::LZCNT32rr:
case X86::LZCNT64rr:
return X86::COND_B;
case X86::POPCNT16rr:
case X86::POPCNT32rr:
case X86::POPCNT64rr:
return X86::COND_E;
case X86::TZCNT16rr:
case X86::TZCNT32rr:
case X86::TZCNT64rr:
return X86::COND_B;
case X86::BSF16rr:
case X86::BSF32rr:
case X86::BSF64rr:
case X86::BSR16rr:
case X86::BSR32rr:
case X86::BSR64rr:
return X86::COND_E;
case X86::BLSI32rr:
case X86::BLSI64rr:
return X86::COND_AE;
case X86::BLSR32rr:
case X86::BLSR64rr:
case X86::BLSMSK32rr:
case X86::BLSMSK64rr:
return X86::COND_B;
// TODO: TBM instructions.
}
}
/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask,
int CmpValue,
const MachineRegisterInfo *MRI) const {
// Check whether we can replace SUB with CMP.
switch (CmpInstr.getOpcode()) {
default: break;
case X86::SUB64ri32:
case X86::SUB64ri8:
case X86::SUB32ri:
case X86::SUB32ri8:
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
case X86::SUB64rr:
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr: {
if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
// There is no use of the destination register, we can replace SUB with CMP.
unsigned NewOpcode = 0;
switch (CmpInstr.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
}
CmpInstr.setDesc(get(NewOpcode));
CmpInstr.RemoveOperand(0);
// Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
return false;
}
}
// Get the unique definition of SrcReg.
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
if (!MI) return false;
// CmpInstr is the first instruction of the BB.
MachineBasicBlock::iterator I = CmpInstr, Def = MI;
// If we are comparing against zero, check whether we can use MI to update
// EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
return false;
// If we have a use of the source register between the def and our compare
// instruction we can eliminate the compare iff the use sets EFLAGS in the
// right way.
bool ShouldUpdateCC = false;
bool NoSignFlag = false;
X86::CondCode NewCC = X86::COND_INVALID;
if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) {
// Scan forward from the use until we hit the use we're looking for or the
// compare instruction.
for (MachineBasicBlock::iterator J = MI;; ++J) {
// Do we have a convertible instruction?
NewCC = isUseDefConvertible(*J);
if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
J->getOperand(1).getReg() == SrcReg) {
assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
ShouldUpdateCC = true; // Update CC later on.
// This is not a def of SrcReg, but still a def of EFLAGS. Keep going
// with the new def.
Def = J;
MI = &*Def;
break;
}
if (J == I)
return false;
}
}
// We are searching for an earlier instruction that can make CmpInstr
// redundant and that instruction will be saved in Sub.
MachineInstr *Sub = nullptr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
// We iterate backward, starting from the instruction before CmpInstr and
// stop when reaching the definition of a source register or done with the BB.
// RI points to the instruction before CmpInstr.
// If the definition is in this basic block, RE points to the definition;
// otherwise, RE is the rend of the basic block.
MachineBasicBlock::reverse_iterator
RI = ++I.getReverse(),
RE = CmpInstr.getParent() == MI->getParent()
? Def.getReverse() /* points to MI */
: CmpInstr.getParent()->rend();
MachineInstr *Movr0Inst = nullptr;
for (; RI != RE; ++RI) {
MachineInstr &Instr = *RI;
// Check whether CmpInstr can be made redundant by the current instruction.
if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
CmpValue, Instr)) {
Sub = &Instr;
break;
}
if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
Instr.readsRegister(X86::EFLAGS, TRI)) {
// This instruction modifies or uses EFLAGS.
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
Movr0Inst = &Instr;
continue;
}
// We can't remove CmpInstr.
return false;
}
}
// Return false if no candidates exist.
if (!IsCmpZero && !Sub)
return false;
bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
Sub->getOperand(2).getReg() == SrcReg);
// Scan forward from the instruction after CmpInstr for uses of EFLAGS.
// It is safe to remove CmpInstr if EFLAGS is redefined or killed.
// If we are done with the basic block, we need to check whether EFLAGS is
// live-out.
bool IsSafe = false;
SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
for (++I; I != E; ++I) {
const MachineInstr &Instr = *I;
bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
// We should check the usage if this instruction uses and updates EFLAGS.
if (!UseEFLAGS && ModifyEFLAGS) {
// It is safe to remove CmpInstr if EFLAGS is updated again.
IsSafe = true;
break;
}
if (!UseEFLAGS && !ModifyEFLAGS)
continue;
// EFLAGS is used by this instruction.
X86::CondCode OldCC = X86::COND_INVALID;
if (IsCmpZero || IsSwapped) {
// We decode the condition code from opcode.
if (Instr.isBranch())
OldCC = X86::getCondFromBranch(Instr);
else {
OldCC = X86::getCondFromSETCC(Instr);
if (OldCC == X86::COND_INVALID)
OldCC = X86::getCondFromCMov(Instr);
}
if (OldCC == X86::COND_INVALID) return false;
}
X86::CondCode ReplacementCC = X86::COND_INVALID;
if (IsCmpZero) {
switch (OldCC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
case X86::COND_O: case X86::COND_NO:
// CF and OF are used, we can't perform this optimization.
return false;
case X86::COND_S: case X86::COND_NS:
// If SF is used, but the instruction doesn't update the SF, then we
// can't do the optimization.
if (NoSignFlag)
return false;
break;
}
// If we're updating the condition code check if we have to reverse the
// condition.
if (ShouldUpdateCC)
switch (OldCC) {
default:
return false;
case X86::COND_E:
ReplacementCC = NewCC;
break;
case X86::COND_NE:
ReplacementCC = GetOppositeBranchCondition(NewCC);
break;
}
} else if (IsSwapped) {
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
// to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
// We swap the condition code and synthesize the new opcode.
ReplacementCC = getSwappedCondition(OldCC);
if (ReplacementCC == X86::COND_INVALID) return false;
}
if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
// Push the MachineInstr to OpsToUpdate.
// If it is safe to remove CmpInstr, the condition code of these
// instructions will be modified.
OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
}
if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
// It is safe to remove CmpInstr if EFLAGS is updated again or killed.
IsSafe = true;
break;
}
}
// If EFLAGS is not killed nor re-defined, we should check whether it is
// live-out. If it is live-out, do not optimize.
if ((IsCmpZero || IsSwapped) && !IsSafe) {
MachineBasicBlock *MBB = CmpInstr.getParent();
for (MachineBasicBlock *Successor : MBB->successors())
if (Successor->isLiveIn(X86::EFLAGS))
return false;
}
// The instruction to be updated is either Sub or MI.
Sub = IsCmpZero ? MI : Sub;
// Move Movr0Inst to the appropriate place before Sub.
if (Movr0Inst) {
// Look backwards until we find a def that doesn't use the current EFLAGS.
Def = Sub;
MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
InsertE = Sub->getParent()->rend();
for (; InsertI != InsertE; ++InsertI) {
MachineInstr *Instr = &*InsertI;
if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
Instr->modifiesRegister(X86::EFLAGS, TRI)) {
Sub->getParent()->remove(Movr0Inst);
Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
Movr0Inst);
break;
}
}
if (InsertI == InsertE)
return false;
}
// Make sure Sub instruction defines EFLAGS and mark the def live.
MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
assert(FlagDef && "Unable to locate a def EFLAGS operand");
FlagDef->setIsDead(false);
CmpInstr.eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
for (auto &Op : OpsToUpdate) {
Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
.setImm(Op.second);
}
return true;
}
/// Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
// Check whether we can move DefMI here.
DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
assert(DefMI);
bool SawStore = false;
if (!DefMI->isSafeToMove(nullptr, SawStore))
return nullptr;
// Collect information about virtual register operands of MI.
SmallVector<unsigned, 1> SrcOperandIds;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (Reg != FoldAsLoadDefReg)
continue;
// Do not fold if we have a subreg use or a def.
if (MO.getSubReg() || MO.isDef())
return nullptr;
SrcOperandIds.push_back(i);
}
if (SrcOperandIds.empty())
return nullptr;
// Check whether we can fold the def into SrcOperandId.
if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
FoldAsLoadDefReg = 0;
return FoldMI;
}
return nullptr;
}
/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two undef reads of the register being defined.
/// This is used for mapping:
/// %xmm4 = V_SET0
/// to:
/// %xmm4 = PXORrr undef %xmm4, undef %xmm4
///
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc) {
assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
unsigned Reg = MIB->getOperand(0).getReg();
MIB->setDesc(Desc);
// MachineInstr::addOperand() will insert explicit operands before any
// implicit operands.
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
// But we don't trust that.
assert(MIB->getOperand(1).getReg() == Reg &&
MIB->getOperand(2).getReg() == Reg && "Misplaced operand");
return true;
}
/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two %k0 reads.
/// This is used for mapping:
/// %k4 = K_SET1
/// to:
/// %k4 = KXNORrr %k0, %k0
static bool Expand2AddrKreg(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc, unsigned Reg) {
assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
MIB->setDesc(Desc);
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
return true;
}
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
bool MinusOne) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
unsigned Reg = MIB->getOperand(0).getReg();
// Insert the XOR.
BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
// Turn the pseudo into an INC or DEC.
MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
MIB.addReg(Reg);
return true;
}
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII,
const X86Subtarget &Subtarget) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
int64_t Imm = MIB->getOperand(1).getImm();
assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
MachineBasicBlock::iterator I = MIB.getInstr();
int StackAdjustment;
if (Subtarget.is64Bit()) {
assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
MIB->getOpcode() == X86::MOV32ImmSExti8);
// Can't use push/pop lowering if the function might write to the red zone.
X86MachineFunctionInfo *X86FI =
MBB.getParent()->getInfo<X86MachineFunctionInfo>();
if (X86FI->getUsesRedZone()) {
MIB->setDesc(TII.get(MIB->getOpcode() ==
X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
return true;
}
// 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
// widen the register if necessary.
StackAdjustment = 8;
BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP64r));
MIB->getOperand(0)
.setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64));
} else {
assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
StackAdjustment = 4;
BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP32r));
}
// Build CFI if necessary.
MachineFunction &MF = *MBB.getParent();
const X86FrameLowering *TFL = Subtarget.getFrameLowering();
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsDwarfCFI =
!IsWin64Prologue &&
(MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
if (EmitCFI) {
TFL->BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
TFL->BuildCFI(MBB, std::next(I), DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
}
return true;
}
// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
unsigned Reg = MIB->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
auto Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8);
MachineBasicBlock::iterator I = MIB.getInstr();
BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
.addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
.addMemOperand(MMO);
MIB->setDebugLoc(DL);
MIB->setDesc(TII.get(X86::MOV64rm));
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
MachineBasicBlock &MBB = *MIB->getParent();
MachineFunction &MF = *MBB.getParent();
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
unsigned XorOp =
MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
MIB->setDesc(TII.get(XorOp));
MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
return true;
}
// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that loads the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &LoadDesc,
const MCInstrDesc &BroadcastDesc,
unsigned SubIdx) {
unsigned DestReg = MIB->getOperand(0).getReg();
// Check if DestReg is XMM16-31 or YMM16-31.
if (TRI->getEncodingValue(DestReg) < 16) {
// We can use a normal VEX encoded load.
MIB->setDesc(LoadDesc);
} else {
// Use a 128/256-bit VBROADCAST instruction.
MIB->setDesc(BroadcastDesc);
// Change the destination to a 512-bit register.
DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
MIB->getOperand(0).setReg(DestReg);
}
return true;
}
// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that stores the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXStore(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &StoreDesc,
const MCInstrDesc &ExtractDesc,
unsigned SubIdx) {
unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg();
// Check if DestReg is XMM16-31 or YMM16-31.
if (TRI->getEncodingValue(SrcReg) < 16) {
// We can use a normal VEX encoded store.
MIB->setDesc(StoreDesc);
} else {
// Use a VEXTRACTF instruction.
MIB->setDesc(ExtractDesc);
// Change the destination to a 512-bit register.
SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
MIB.addImm(0x0); // Append immediate to extract from the lower bits.
}
return true;
}
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
MIB->setDesc(Desc);
int64_t ShiftAmt = MIB->getOperand(2).getImm();
// Temporarily remove the immediate so we can add another source register.
MIB->RemoveOperand(2);
// Add the register. Don't copy the kill flag if there is one.
MIB.addReg(MIB->getOperand(1).getReg(),
getUndefRegState(MIB->getOperand(1).isUndef()));
// Add back the immediate.
MIB.addImm(ShiftAmt);
return true;
}
bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
bool HasAVX = Subtarget.hasAVX();
MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
switch (MI.getOpcode()) {
case X86::MOV32r0:
return Expand2AddrUndef(MIB, get(X86::XOR32rr));
case X86::MOV32r1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
case X86::MOV32r_1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
case X86::MOV32ImmSExti8:
case X86::MOV64ImmSExti8:
return ExpandMOVImmSExti8(MIB, *this, Subtarget);
case X86::SETB_C8r:
return Expand2AddrUndef(MIB, get(X86::SBB8rr));
case X86::SETB_C16r:
return Expand2AddrUndef(MIB, get(X86::SBB16rr));
case X86::SETB_C32r:
return Expand2AddrUndef(MIB, get(X86::SBB32rr));
case X86::SETB_C64r:
return Expand2AddrUndef(MIB, get(X86::SBB64rr));
case X86::MMX_SET0:
return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
case X86::V_SET0:
case X86::FsFLD0SS:
case X86::FsFLD0SD:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
case X86::AVX_SET0: {
assert(HasAVX && "AVX not supported");
const TargetRegisterInfo *TRI = &getRegisterInfo();
unsigned SrcReg = MIB->getOperand(0).getReg();
unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
MIB->getOperand(0).setReg(XReg);
Expand2AddrUndef(MIB, get(X86::VXORPSrr));
MIB.addReg(SrcReg, RegState::ImplicitDefine);
return true;
}
case X86::AVX512_128_SET0:
case X86::AVX512_FsFLD0SS:
case X86::AVX512_FsFLD0SD: {
bool HasVLX = Subtarget.hasVLX();
unsigned SrcReg = MIB->getOperand(0).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
return Expand2AddrUndef(MIB,
get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
// Extended register without VLX. Use a larger XOR.
SrcReg =
TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
MIB->getOperand(0).setReg(SrcReg);
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
}
case X86::AVX512_256_SET0:
case X86::AVX512_512_SET0: {
bool HasVLX = Subtarget.hasVLX();
unsigned SrcReg = MIB->getOperand(0).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
MIB->getOperand(0).setReg(XReg);
Expand2AddrUndef(MIB,
get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
MIB.addReg(SrcReg, RegState::ImplicitDefine);
return true;
}
if (MI.getOpcode() == X86::AVX512_256_SET0) {
// No VLX so we must reference a zmm.
unsigned ZReg =
TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
MIB->getOperand(0).setReg(ZReg);
}
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
}
case X86::V_SETALLONES:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
case X86::AVX2_SETALLONES:
return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
case X86::AVX1_SETALLONES: {
unsigned Reg = MIB->getOperand(0).getReg();
// VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
MIB->setDesc(get(X86::VCMPPSYrri));
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
return true;
}
case X86::AVX512_512_SETALLONES: {
unsigned Reg = MIB->getOperand(0).getReg();
MIB->setDesc(get(X86::VPTERNLOGDZrri));
// VPTERNLOGD needs 3 register inputs and an immediate.
// 0xff will return 1s for any input.
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef).addImm(0xff);
return true;
}
case X86::AVX512_512_SEXT_MASK_32:
case X86::AVX512_512_SEXT_MASK_64: {
unsigned Reg = MIB->getOperand(0).getReg();
unsigned MaskReg = MIB->getOperand(1).getReg();
unsigned MaskState = getRegState(MIB->getOperand(1));
unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
MI.RemoveOperand(1);
MIB->setDesc(get(Opc));
// VPTERNLOG needs 3 register inputs and an immediate.
// 0xff will return 1s for any input.
MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
return true;
}
case X86::VMOVAPSZ128rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
case X86::VMOVUPSZ128rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
case X86::VMOVAPSZ256rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
case X86::VMOVUPSZ256rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
case X86::VMOVAPSZ128mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
case X86::VMOVUPSZ128mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
case X86::VMOVAPSZ256mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
case X86::VMOVUPSZ256mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
case X86::MOV32ri64: {
unsigned Reg = MIB->getOperand(0).getReg();
unsigned Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
MI.setDesc(get(X86::MOV32ri));
MIB->getOperand(0).setReg(Reg32);
MIB.addReg(Reg, RegState::ImplicitDefine);
return true;
}
// KNL does not recognize dependency-breaking idioms for mask registers,
// so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
// Using %k0 as the undef input register is a performance heuristic based
// on the assumption that %k0 is used less frequently than the other mask
// registers, since it is not usable as a write mask.
// FIXME: A more advanced approach would be to choose the best input mask
// register based on context.
case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(MIB, *this);
return true;
case X86::XOR64_FP:
case X86::XOR32_FP:
return expandXorFP(MIB, *this);
case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break;
case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break;
case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break;
case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break;
case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break;
case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break;
case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break;
case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break;
case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break;
case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break;
}
return false;
}
/// Return true for all instructions that only update
/// the first 32 or 64-bits of the destination register and leave the rest
/// unmodified. This can be used to avoid folding loads if the instructions
/// only update part of the destination register, and the non-updated part is
/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
/// instructions breaks the partial register dependency and it can improve
/// performance. e.g.:
///
/// movss (%rdi), %xmm0
/// cvtss2sd %xmm0, %xmm0
///
/// Instead of
/// cvtss2sd (%rdi), %xmm0
///
/// FIXME: This should be turned into a TSFlags.
///
static bool hasPartialRegUpdate(unsigned Opcode,
const X86Subtarget &Subtarget,
bool ForLoadFold = false) {
switch (Opcode) {
case X86::CVTSI2SSrr:
case X86::CVTSI2SSrm:
case X86::CVTSI642SSrr:
case X86::CVTSI642SSrm:
case X86::CVTSI2SDrr:
case X86::CVTSI2SDrm:
case X86::CVTSI642SDrr:
case X86::CVTSI642SDrm:
// Load folding won't effect the undef register update since the input is
// a GPR.
return !ForLoadFold;
case X86::CVTSD2SSrr:
case X86::CVTSD2SSrm:
case X86::CVTSS2SDrr:
case X86::CVTSS2SDrm:
case X86::MOVHPDrm:
case X86::MOVHPSrm:
case X86::MOVLPDrm:
case X86::MOVLPSrm:
case X86::RCPSSr:
case X86::RCPSSm:
case X86::RCPSSr_Int:
case X86::RCPSSm_Int:
case X86::ROUNDSDr:
case X86::ROUNDSDm:
case X86::ROUNDSSr:
case X86::ROUNDSSm:
case X86::RSQRTSSr:
case X86::RSQRTSSm:
case X86::RSQRTSSr_Int:
case X86::RSQRTSSm_Int:
case X86::SQRTSSr:
case X86::SQRTSSm:
case X86::SQRTSSr_Int:
case X86::SQRTSSm_Int:
case X86::SQRTSDr:
case X86::SQRTSDm:
case X86::SQRTSDr_Int:
case X86::SQRTSDm_Int:
return true;
// GPR
case X86::POPCNT32rm:
case X86::POPCNT32rr:
case X86::POPCNT64rm:
case X86::POPCNT64rr:
return Subtarget.hasPOPCNTFalseDeps();
case X86::LZCNT32rm:
case X86::LZCNT32rr:
case X86::LZCNT64rm:
case X86::LZCNT64rr:
case X86::TZCNT32rm:
case X86::TZCNT32rr:
case X86::TZCNT64rm:
case X86::TZCNT64rr:
return Subtarget.hasLZCNTFalseDeps();
}
return false;
}
/// Inform the BreakFalseDeps pass how many idle
/// instructions we would like before a partial register update.
unsigned X86InstrInfo::getPartialRegUpdateClearance(
const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
return 0;
// If MI is marked as reading Reg, the partial register update is wanted.
const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
if (MI.readsRegister(Reg, TRI))
return 0;
}
// If any instructions in the clearance range are reading Reg, insert a
// dependency breaking instruction, which is inexpensive and is likely to
// be hidden in other instruction's cycles.
return PartialRegUpdateClearance;
}
// Return true for any instruction the copies the high bits of the first source
// operand into the unused high bits of the destination operand.
static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) {
switch (Opcode) {
case X86::VCVTSI2SSrr:
case X86::VCVTSI2SSrm:
case X86::VCVTSI2SSrr_Int:
case X86::VCVTSI2SSrm_Int:
case X86::VCVTSI642SSrr:
case X86::VCVTSI642SSrm:
case X86::VCVTSI642SSrr_Int:
case X86::VCVTSI642SSrm_Int:
case X86::VCVTSI2SDrr:
case X86::VCVTSI2SDrm:
case X86::VCVTSI2SDrr_Int:
case X86::VCVTSI2SDrm_Int:
case X86::VCVTSI642SDrr:
case X86::VCVTSI642SDrm:
case X86::VCVTSI642SDrr_Int:
case X86::VCVTSI642SDrm_Int:
// AVX-512
case X86::VCVTSI2SSZrr:
case X86::VCVTSI2SSZrm:
case X86::VCVTSI2SSZrr_Int:
case X86::VCVTSI2SSZrrb_Int:
case X86::VCVTSI2SSZrm_Int:
case X86::VCVTSI642SSZrr:
case X86::VCVTSI642SSZrm:
case X86::VCVTSI642SSZrr_Int:
case X86::VCVTSI642SSZrrb_Int:
case X86::VCVTSI642SSZrm_Int:
case X86::VCVTSI2SDZrr:
case X86::VCVTSI2SDZrm:
case X86::VCVTSI2SDZrr_Int:
case X86::VCVTSI2SDZrm_Int:
case X86::VCVTSI642SDZrr:
case X86::VCVTSI642SDZrm:
case X86::VCVTSI642SDZrr_Int:
case X86::VCVTSI642SDZrrb_Int:
case X86::VCVTSI642SDZrm_Int:
case X86::VCVTUSI2SSZrr:
case X86::VCVTUSI2SSZrm:
case X86::VCVTUSI2SSZrr_Int:
case X86::VCVTUSI2SSZrrb_Int:
case X86::VCVTUSI2SSZrm_Int:
case X86::VCVTUSI642SSZrr:
case X86::VCVTUSI642SSZrm:
case X86::VCVTUSI642SSZrr_Int:
case X86::VCVTUSI642SSZrrb_Int:
case X86::VCVTUSI642SSZrm_Int:
case X86::VCVTUSI2SDZrr:
case X86::VCVTUSI2SDZrm:
case X86::VCVTUSI2SDZrr_Int:
case X86::VCVTUSI2SDZrm_Int:
case X86::VCVTUSI642SDZrr:
case X86::VCVTUSI642SDZrm:
case X86::VCVTUSI642SDZrr_Int:
case X86::VCVTUSI642SDZrrb_Int:
case X86::VCVTUSI642SDZrm_Int:
// Load folding won't effect the undef register update since the input is
// a GPR.
return !ForLoadFold;
case X86::VCVTSD2SSrr:
case X86::VCVTSD2SSrm:
case X86::VCVTSD2SSrr_Int:
case X86::VCVTSD2SSrm_Int:
case X86::VCVTSS2SDrr:
case X86::VCVTSS2SDrm:
case X86::VCVTSS2SDrr_Int:
case X86::VCVTSS2SDrm_Int:
case X86::VRCPSSr:
case X86::VRCPSSr_Int:
case X86::VRCPSSm:
case X86::VRCPSSm_Int:
case X86::VROUNDSDr:
case X86::VROUNDSDm:
case X86::VROUNDSDr_Int:
case X86::VROUNDSDm_Int:
case X86::VROUNDSSr:
case X86::VROUNDSSm:
case X86::VROUNDSSr_Int:
case X86::VROUNDSSm_Int:
case X86::VRSQRTSSr:
case X86::VRSQRTSSr_Int:
case X86::VRSQRTSSm:
case X86::VRSQRTSSm_Int:
case X86::VSQRTSSr:
case X86::VSQRTSSr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSDr:
case X86::VSQRTSDr_Int:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
// AVX-512
case X86::VCVTSD2SSZrr:
case X86::VCVTSD2SSZrr_Int:
case X86::VCVTSD2SSZrrb_Int:
case X86::VCVTSD2SSZrm:
case X86::VCVTSD2SSZrm_Int:
case X86::VCVTSS2SDZrr:
case X86::VCVTSS2SDZrr_Int:
case X86::VCVTSS2SDZrrb_Int:
case X86::VCVTSS2SDZrm:
case X86::VCVTSS2SDZrm_Int:
case X86::VGETEXPSDZr:
case X86::VGETEXPSDZrb:
case X86::VGETEXPSDZm:
case X86::VGETEXPSSZr:
case X86::VGETEXPSSZrb:
case X86::VGETEXPSSZm:
case X86::VGETMANTSDZrri:
case X86::VGETMANTSDZrrib:
case X86::VGETMANTSDZrmi:
case X86::VGETMANTSSZrri:
case X86::VGETMANTSSZrrib:
case X86::VGETMANTSSZrmi:
case X86::VRNDSCALESDZr:
case X86::VRNDSCALESDZr_Int:
case X86::VRNDSCALESDZrb_Int:
case X86::VRNDSCALESDZm:
case X86::VRNDSCALESDZm_Int:
case X86::VRNDSCALESSZr:
case X86::VRNDSCALESSZr_Int:
case X86::VRNDSCALESSZrb_Int:
case X86::VRNDSCALESSZm:
case X86::VRNDSCALESSZm_Int:
case X86::VRCP14SDZrr:
case X86::VRCP14SDZrm:
case X86::VRCP14SSZrr:
case X86::VRCP14SSZrm:
case X86::VRCP28SDZr:
case X86::VRCP28SDZrb:
case X86::VRCP28SDZm:
case X86::VRCP28SSZr:
case X86::VRCP28SSZrb:
case X86::VRCP28SSZm:
case X86::VREDUCESSZrmi:
case X86::VREDUCESSZrri:
case X86::VREDUCESSZrrib:
case X86::VRSQRT14SDZrr:
case X86::VRSQRT14SDZrm:
case X86::VRSQRT14SSZrr:
case X86::VRSQRT14SSZrm:
case X86::VRSQRT28SDZr:
case X86::VRSQRT28SDZrb:
case X86::VRSQRT28SDZm:
case X86::VRSQRT28SSZr:
case X86::VRSQRT28SSZrb:
case X86::VRSQRT28SSZm:
case X86::VSQRTSSZr:
case X86::VSQRTSSZr_Int:
case X86::VSQRTSSZrb_Int:
case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
case X86::VSQRTSDZr:
case X86::VSQRTSDZr_Int:
case X86::VSQRTSDZrb_Int:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
return true;
}
return false;
}
/// Inform the BreakFalseDeps pass how many idle instructions we would like
/// before certain undef register reads.
///
/// This catches the VCVTSI2SD family of instructions:
///
/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
///
/// We should to be careful *not* to catch VXOR idioms which are presumably
/// handled specially in the pipeline:
///
/// vxorps undef %xmm1, undef %xmm1, %xmm1
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
unsigned
X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
if (!hasUndefRegUpdate(MI.getOpcode()))
return 0;
// Set the OpNum parameter to the first source operand.
OpNum = 1;
const MachineOperand &MO = MI.getOperand(OpNum);
if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
return UndefRegClearance;
}
return 0;
}
void X86InstrInfo::breakPartialRegDependency(
MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
unsigned Reg = MI.getOperand(OpNum).getReg();
// If MI kills this register, the false dependence is already broken.
if (MI.killsRegister(Reg, TRI))
return;
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::VR256RegClass.contains(Reg)) {
// Use vxorps to clear the full ymm register.
// It wants to read and write the xmm sub-register.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
.addReg(XReg, RegState::Undef)
.addReg(XReg, RegState::Undef)
.addReg(Reg, RegState::ImplicitDefine);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::GR64RegClass.contains(Reg)) {
// Using XOR32rr because it has shorter encoding and zeros up the upper bits
// as well.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
.addReg(XReg, RegState::Undef)
.addReg(XReg, RegState::Undef)
.addReg(Reg, RegState::ImplicitDefine);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::GR32RegClass.contains(Reg)) {
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
MI.addRegisterKilled(Reg, TRI, true);
}
}
static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
int PtrOffset = 0) {
unsigned NumAddrOps = MOs.size();
if (NumAddrOps < 4) {
// FrameIndex only - add an immediate offset (whether its zero or not).
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.add(MOs[i]);
addOffset(MIB, PtrOffset);
} else {
// General Memory Addressing - we need to add any offset to an existing
// offset.
assert(MOs.size() == 5 && "Unexpected memory operand list length");
for (unsigned i = 0; i != NumAddrOps; ++i) {
const MachineOperand &MO = MOs[i];
if (i == 3 && PtrOffset != 0) {
MIB.addDisp(MO, PtrOffset);
} else {
MIB.add(MO);
}
}
}
}
static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) {
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
MachineOperand &MO = NewMI.getOperand(Idx);
// We only need to update constraints on virtual register operands.
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TRI.isVirtualRegister(Reg))
continue;
auto *NewRC = MRI.constrainRegClass(
Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
if (!NewRC) {
LLVM_DEBUG(
dbgs() << "WARNING: Unable to update register constraint for operand "
<< Idx << " of instruction:\n";
NewMI.dump(); dbgs() << "\n");
}
}
}
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI.getDesc().getNumOperands() - 2;
for (unsigned i = 0; i != NumOps; ++i) {
MachineOperand &MO = MI.getOperand(i + 2);
MIB.add(MO);
}
for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
MIB.add(MO);
}
updateOperandRegConstraints(MF, *NewMI, TII);
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI, const TargetInstrInfo &TII,
int PtrOffset = 0) {
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
addOperands(MIB, MOs, PtrOffset);
} else {
MIB.add(MO);
}
}
updateOperandRegConstraints(MF, *NewMI, TII);
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI) {
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI.getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
return MIB.addImm(0);
}
MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align) const {
switch (MI.getOpcode()) {
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
case X86::VINSERTPSZrr:
// Attempt to convert the load of inserted vector into a fold load
// of a single float.
if (OpNum == 2) {
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
unsigned ZMask = Imm & 15;
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
unsigned NewOpCode =
(MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
(MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm :
X86::INSERTPSrm;
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
return NewMI;
}
}
break;
case X86::MOVHLPSrr:
case X86::VMOVHLPSrr:
case X86::VMOVHLPSZrr:
// Move the upper 64-bits of the second operand to the lower 64-bits.
// To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && 8 <= Align) {
unsigned NewOpCode =
(MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
(MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm :
X86::MOVLPSrm;
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
return NewMI;
}
}
break;
case X86::UNPCKLPDrr:
// If we won't be able to fold this to the memory form of UNPCKL, use
// MOVHPD instead. Done as custom because we can't have this in the load
// table twice.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Align < 16) {
MachineInstr *NewMI =
FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
return NewMI;
}
}
break;
}
return nullptr;
}
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
MachineInstr &MI) {
if (!hasUndefRegUpdate(MI.getOpcode(), /*ForLoadFold*/true) ||
!MI.getOperand(1).isReg())
return false;
// The are two cases we need to handle depending on where in the pipeline
// the folding attempt is being made.
// -Register has the undef flag set.
// -Register is produced by the IMPLICIT_DEF instruction.
if (MI.getOperand(1).isUndef())
return true;
MachineRegisterInfo &RegInfo = MF.getRegInfo();
MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
return VRegDef && VRegDef->isImplicitDef();
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align, bool AllowCommute) const {
bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
bool isTwoAddrFold = false;
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
unsigned NumOps = MI.getDesc().getNumOperands();
bool isTwoAddr =
NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// FIXME: AsmPrinter doesn't know how to handle
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
if (MI.getOpcode() == X86::ADD32ri &&
MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
return nullptr;
// GOTTPOFF relocation loads can only be folded into add instructions.
// FIXME: Need to exclude other relocations that only support specific
// instructions.
if (MOs.size() == X86::AddrNumOperands &&
MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
MI.getOpcode() != X86::ADD64rr)
return nullptr;
MachineInstr *NewMI = nullptr;
// Attempt to fold any custom cases we have.
if (MachineInstr *CustomMI =
foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align))
return CustomMI;
const X86MemoryFoldTableEntry *I = nullptr;
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
I = lookupTwoAddrFoldTable(MI.getOpcode());
isTwoAddrFold = true;
} else {
if (OpNum == 0) {
if (MI.getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
}
}
I = lookupFoldTable(MI.getOpcode(), OpNum);
}
if (I != nullptr) {
unsigned Opcode = I->DstOp;
unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
if (Align < MinAlign)
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
&RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size < RCSize) {
// FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
return nullptr;
// If this is a 64-bit load, but the spill slot is 32, then we can do
// a 32-bit load which is implicitly zero-extended. This likely is
// due to live interval analysis remat'ing a load from stack slot.
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
return nullptr;
Opcode = X86::MOV32rm;
NarrowToMOV32rm = true;
}
}
if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
else
NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit
// value and zero-extend the top bits. Change the destination register
// to a 32-bit one.
unsigned DstReg = NewMI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
else
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
}
return NewMI;
}
// If the instruction and target operand are commutable, commute the
// instruction and try again.
if (AllowCommute) {
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
bool HasDef = MI.getDesc().getNumDefs();
Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
// If either of the commutable operands are tied to the destination
// then we can not commute + fold.
if ((HasDef && Reg0 == Reg1 && Tied1) ||
(HasDef && Reg0 == Reg2 && Tied2))
return nullptr;
MachineInstr *CommutedMI =
commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
if (!CommutedMI) {
// Unable to commute.
return nullptr;
}
if (CommutedMI != &MI) {
// New instruction. We can't fold from this.
CommutedMI->eraseFromParent();
return nullptr;
}
// Attempt to fold with the commuted version of the instruction.
NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt,
Size, Align, /*AllowCommute=*/false);
if (NewMI)
return NewMI;
// Folding failed again - undo the commute before returning.
MachineInstr *UncommutedMI =
commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
if (!UncommutedMI) {
// Unable to commute.
return nullptr;
}
if (UncommutedMI != &MI) {
// New instruction. It doesn't need to be kept.
UncommutedMI->eraseFromParent();
return nullptr;
}
// Return here to prevent duplicate fuse failure report.
return nullptr;
}
}
// No fusion
if (PrintFailedFusing && !MI.isCopy())
dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
return nullptr;
}
MachineInstr *
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex, LiveIntervals *LIS,
VirtRegMap *VRM) const {
// Check switch flag
if (NoFusing)
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
// Don't fold subreg spills, or reloads that use a high subreg.
for (auto Op : Ops) {
MachineOperand &MO = MI.getOperand(Op);
auto SubReg = MO.getSubReg();
if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
return nullptr;
}
const MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Size = MFI.getObjectSize(FrameIndex);
unsigned Alignment = MFI.getObjectAlignment(FrameIndex);
// If the function stack isn't realigned we don't want to fold instructions
// that need increased alignment.
if (!RI.needsStackRealignment(MF))
Alignment =
std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
}
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Size < RCSize)
return nullptr;
// Change to CMPXXri r, 0 first.
MI.setDesc(get(NewOpc));
MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops[0],
MachineOperand::CreateFI(FrameIndex), InsertPt,
Size, Alignment, /*AllowCommute=*/true);
}
/// Check if \p LoadMI is a partial register load that we can't fold into \p MI
/// because the latter uses contents that wouldn't be defined in the folded
/// version. For instance, this transformation isn't legal:
/// movss (%rdi), %xmm0
/// addps %xmm0, %xmm0
/// ->
/// addps (%rdi), %xmm0
///
/// But this one is:
/// movss (%rdi), %xmm0
/// addss %xmm0, %xmm0
/// ->
/// addss (%rdi), %xmm0
///
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
const MachineInstr &UserMI,
const MachineFunction &MF) {
unsigned Opc = LoadMI.getOpcode();
unsigned UserOpc = UserMI.getOpcode();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
unsigned RegSize = TRI.getRegSizeInBits(*RC);
if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
Opc == X86::VMOVSSZrm_alt) &&
RegSize > 32) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
switch (UserOpc) {
case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
case X86::VCMPSSZrr_Intk:
case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int:
case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int:
case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int:
case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int:
case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int:
case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int:
case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int:
case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int:
case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
return false;
default:
return true;
}
}
if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
Opc == X86::VMOVSDZrm_alt) &&
RegSize > 64) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
switch (UserOpc) {
case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
case X86::VCMPSDZrr_Intk:
case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int:
case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int:
case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int:
case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int:
case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int:
case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int:
case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int:
case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int:
case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
return false;
default:
return true;
}
}
return false;
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS) const {
// TODO: Support the case where LoadMI loads a wide register, but MI
// only uses a subreg.
for (auto Op : Ops) {
if (MI.getOperand(Op).getSubReg())
return nullptr;
}
// If loading from a FrameIndex, fold directly from the FrameIndex.
unsigned NumOps = LoadMI.getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
}
// Check switch flag
if (NoFusing) return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
// Determine the alignment of the load.
unsigned Alignment = 0;
if (LoadMI.hasOneMemOperand())
Alignment = (*LoadMI.memoperands_begin())->getAlignment();
else
switch (LoadMI.getOpcode()) {
case X86::AVX512_512_SET0:
case X86::AVX512_512_SETALLONES:
Alignment = 64;
break;
case X86::AVX2_SETALLONES:
case X86::AVX1_SETALLONES:
case X86::AVX_SET0:
case X86::AVX512_256_SET0:
Alignment = 32;
break;
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX512_128_SET0:
Alignment = 16;
break;
case X86::MMX_SET0:
case X86::FsFLD0SD:
case X86::AVX512_FsFLD0SD:
Alignment = 8;
break;
case X86::FsFLD0SS:
case X86::AVX512_FsFLD0SS:
Alignment = 4;
break;
default:
return nullptr;
}
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
}
// Change to CMPXXri r, 0 first.
MI.setDesc(get(NewOpc));
MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
// Make sure the subregisters match.
// Otherwise we risk changing the size of the load.
if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
return nullptr;
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
switch (LoadMI.getOpcode()) {
case X86::MMX_SET0:
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX2_SETALLONES:
case X86::AVX1_SETALLONES:
case X86::AVX_SET0:
case X86::AVX512_128_SET0:
case X86::AVX512_256_SET0:
case X86::AVX512_512_SET0:
case X86::AVX512_512_SETALLONES:
case X86::FsFLD0SD:
case X86::AVX512_FsFLD0SD:
case X86::FsFLD0SS:
case X86::AVX512_FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
// Create a constant-pool entry and operands to load from it.
// Medium and large mode can't fold loads this way.
if (MF.getTarget().getCodeModel() != CodeModel::Small &&
MF.getTarget().getCodeModel() != CodeModel::Kernel)
return nullptr;
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
if (MF.getTarget().isPositionIndependent()) {
if (Subtarget.is64Bit())
PICBase = X86::RIP;
else
// FIXME: PICBase = getGlobalBaseReg(&MF);
// This doesn't work for several reasons.
// 1. GlobalBaseReg may have been spilled.
// 2. It may not be live at MI.
return nullptr;
}
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction().getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction().getContext());
else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
else if (Opc == X86::MMX_SET0)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
Opc == X86::AVX512_512_SETALLONES ||
Opc == X86::AVX1_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
// Create operands to load from the constant pool entry.
MOs.push_back(MachineOperand::CreateReg(PICBase, false));
MOs.push_back(MachineOperand::CreateImm(1));
MOs.push_back(MachineOperand::CreateReg(0, false));
MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
MOs.push_back(MachineOperand::CreateReg(0, false));
break;
}
default: {
if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
// Folding a normal load. Just copy the load's address operands.
MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
LoadMI.operands_begin() + NumOps);
break;
}
}
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}
static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
SmallVector<MachineMemOperand *, 2> LoadMMOs;
for (MachineMemOperand *MMO : MMOs) {
if (!MMO->isLoad())
continue;
if (!MMO->isStore()) {
// Reuse the MMO.
LoadMMOs.push_back(MMO);
} else {
// Clone the MMO and unset the store flag.
LoadMMOs.push_back(MF.getMachineMemOperand(
MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
}
}
return LoadMMOs;
}
static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
SmallVector<MachineMemOperand *, 2> StoreMMOs;
for (MachineMemOperand *MMO : MMOs) {
if (!MMO->isStore())
continue;
if (!MMO->isLoad()) {
// Reuse the MMO.
StoreMMOs.push_back(MMO);
} else {
// Clone the MMO and unset the load flag.
StoreMMOs.push_back(MF.getMachineMemOperand(
MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
}
}
return StoreMMOs;
}
bool X86InstrInfo::unfoldMemoryOperand(
MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
if (I == nullptr)
return false;
unsigned Opc = I->DstOp;
unsigned Index = I->Flags & TB_INDEX_MASK;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
if (UnfoldLoad && !FoldedLoad)
return false;
UnfoldLoad &= FoldedLoad;
if (UnfoldStore && !FoldedStore)
return false;
UnfoldStore &= FoldedStore;
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
// TODO: Check if 32-byte or greater accesses are slow too?
if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
// conservatively assume the address is unaligned. That's bad for
// performance.
return false;
SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI.getOperand(i);
if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
else if (i < Index)
BeforeOps.push_back(Op);
else if (i > Index)
AfterOps.push_back(Op);
}
// Emit the load instruction.
if (UnfoldLoad) {
auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
if (MO.isReg())
MO.setIsKill(false);
}
}
}
// Emit the data processing instruction.
MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, DataMI);
if (FoldedStore)
MIB.addReg(Reg, RegState::Define);
for (MachineOperand &BeforeOp : BeforeOps)
MIB.add(BeforeOp);
if (FoldedLoad)
MIB.addReg(Reg);
for (MachineOperand &AfterOp : AfterOps)
MIB.add(AfterOp);
for (MachineOperand &ImpOp : ImpOps) {
MIB.addReg(ImpOp.getReg(),
getDefRegState(ImpOp.isDef()) |
RegState::Implicit |
getKillRegState(ImpOp.isKill()) |
getDeadRegState(ImpOp.isDead()) |
getUndefRegState(ImpOp.isUndef()));
}
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
switch (DataMI->getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri: {
MachineOperand &MO0 = DataMI->getOperand(0);
MachineOperand &MO1 = DataMI->getOperand(1);
if (MO1.getImm() == 0) {
unsigned NewOpc;
switch (DataMI->getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::CMP64ri8:
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
case X86::CMP32ri8:
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
case X86::CMP16ri8:
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
}
DataMI->setDesc(get(NewOpc));
MO1.ChangeToRegister(MO0.getReg(), false);
}
}
}
NewMIs.push_back(DataMI);
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs);
}
return true;
}
bool
X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const {
if (!N->isMachineOpcode())
return false;
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
if (I == nullptr)
return false;
unsigned Opc = I->DstOp;
unsigned Index = I->Flags & TB_INDEX_MASK;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
std::vector<SDValue> AfterOps;
SDLoc dl(N);
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
else if (i > Index-NumDefs)
AfterOps.push_back(Op);
}
SDValue Chain = N->getOperand(NumOps-1);
AddrOps.push_back(Chain);
// Emit the load instruction.
SDNode *Load = nullptr;
if (FoldedLoad) {
EVT VT = *TRI.legalclasstypes_begin(*RC);
auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
if (MMOs.empty() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Do not introduce a slow unaligned load.
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
VT, MVT::Other, AddrOps);
NewNodes.push_back(Load);
// Preserve memory reference information.
DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
}
// Emit the data processing instruction.
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
DstRC = getRegClass(MCID, 0, &RI, MF);
VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
VTs.push_back(VT);
}
if (Load)
BeforeOps.push_back(SDValue(Load, 0));
BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end());
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
switch (Opc) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
if (isNullConstant(BeforeOps[1])) {
switch (Opc) {
default: llvm_unreachable("Unreachable!");
case X86::CMP64ri8:
case X86::CMP64ri32: Opc = X86::TEST64rr; break;
case X86::CMP32ri8:
case X86::CMP32ri: Opc = X86::TEST32rr; break;
case X86::CMP16ri8:
case X86::CMP16ri: Opc = X86::TEST16rr; break;
case X86::CMP8ri: Opc = X86::TEST8rr; break;
}
BeforeOps[1] = BeforeOps[0];
}
}
SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
NewNodes.push_back(NewNode);
// Emit the store instruction.
if (FoldedStore) {
AddrOps.pop_back();
AddrOps.push_back(SDValue(NewNode, 0));
AddrOps.push_back(Chain);
auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
if (MMOs.empty() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Do not introduce a slow unaligned store.
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
SDNode *Store =
DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
dl, MVT::Other, AddrOps);
NewNodes.push_back(Store);
// Preserve memory reference information.
DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
}
return true;
}
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex) const {
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
if (I == nullptr)
return 0;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
if (UnfoldLoad && !FoldedLoad)
return 0;
if (UnfoldStore && !FoldedStore)
return 0;
if (LoadRegIndex)
*LoadRegIndex = I->Flags & TB_INDEX_MASK;
return I->DstOp;
}
bool
X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1, int64_t &Offset2) const {
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
return false;
unsigned Opc1 = Load1->getMachineOpcode();
unsigned Opc2 = Load2->getMachineOpcode();
switch (Opc1) {
default: return false;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MOVSSrm:
case X86::MOVSSrm_alt:
case X86::MOVSDrm:
case X86::MOVSDrm_alt:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
case X86::VMOVSSrm:
case X86::VMOVSSrm_alt:
case X86::VMOVSDrm:
case X86::VMOVSDrm_alt:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
// AVX512 load instructions
case X86::VMOVSSZrm:
case X86::VMOVSSZrm_alt:
case X86::VMOVSDZrm:
case X86::VMOVSDZrm_alt:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
case X86::KMOVBkm:
case X86::KMOVWkm:
case X86::KMOVDkm:
case X86::KMOVQkm:
break;
}
switch (Opc2) {
default: return false;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MOVSSrm:
case X86::MOVSSrm_alt:
case X86::MOVSDrm:
case X86::MOVSDrm_alt:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
case X86::VMOVSSrm:
case X86::VMOVSSrm_alt:
case X86::VMOVSDrm:
case X86::VMOVSDrm_alt:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
// AVX512 load instructions
case X86::VMOVSSZrm:
case X86::VMOVSSZrm_alt:
case X86::VMOVSDZrm:
case X86::VMOVSDZrm_alt:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
case X86::KMOVBkm:
case X86::KMOVWkm:
case X86::KMOVDkm:
case X86::KMOVQkm:
break;
}
// Lambda to check if both the loads have the same value for an operand index.
auto HasSameOp = [&](int I) {
return Load1->getOperand(I) == Load2->getOperand(I);
};
// All operands except the displacement should match.
if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
!HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
return false;
// Chain Operand must be the same.
if (!HasSameOp(5))
return false;
// Now let's examine if the displacements are constants.
auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
if (!Disp1 || !Disp2)
return false;
Offset1 = Disp1->getSExtValue();
Offset2 = Disp2->getSExtValue();
return true;
}
bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const {
assert(Offset2 > Offset1);
if ((Offset2 - Offset1) / 8 > 64)
return false;
unsigned Opc1 = Load1->getMachineOpcode();
unsigned Opc2 = Load2->getMachineOpcode();
if (Opc1 != Opc2)
return false; // FIXME: overly conservative?
switch (Opc1) {
default: break;
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
return false;
}
EVT VT = Load1->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
default:
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
// have 16 of them to play with.
if (Subtarget.is64Bit()) {
if (NumLoads >= 3)
return false;
} else if (NumLoads) {
return false;
}
break;
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
case MVT::f32:
case MVT::f64:
if (NumLoads)
return false;
break;
}
return true;
}
bool X86InstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
bool X86InstrInfo::
isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
// FIXME: Return false for x87 stack register classes for now. We can't
// allow any loads of these registers before FpGet_ST0_80.
return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
RC == &X86::RFP80RegClass);
}
/// Return a virtual register initialized with the
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
assert((!Subtarget.is64Bit() ||
MF->getTarget().getCodeModel() == CodeModel::Medium ||
MF->getTarget().getCodeModel() == CodeModel::Large) &&
"X86-64 PIC uses RIP relative addressing");
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
if (GlobalBaseReg != 0)
return GlobalBaseReg;
// Create the register. The code to initialize it is inserted
// later, by the CGBR pass (below).
MachineRegisterInfo &RegInfo = MF->getRegInfo();
GlobalBaseReg = RegInfo.createVirtualRegister(
Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
X86FI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
// These are the replaceable SSE instructions. Some of these have Int variants
// that we don't include here. We don't want to replace instructions selected
// by intrinsics.
static const uint16_t ReplaceableInstrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
{ X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
{ X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
{ X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
{ X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
{ X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
{ X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr },
{ X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr },
{ X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
{ X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
{ X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
{ X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
{ X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
{ X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
{ X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
{ X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
{ X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
{ X86::ORPSrm, X86::ORPDrm, X86::PORrm },
{ X86::ORPSrr, X86::ORPDrr, X86::PORrr },
{ X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
{ X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
{ X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
{ X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
{ X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
{ X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
{ X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
{ X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
{ X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
{ X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
{ X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
{ X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
// AVX 128-bit support
{ X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
{ X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
{ X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
{ X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
{ X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
{ X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
{ X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr },
{ X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr },
{ X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
{ X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
{ X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
{ X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
{ X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
{ X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
{ X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
{ X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
{ X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
{ X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
{ X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
{ X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
{ X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
{ X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
{ X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
{ X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
{ X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
{ X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
{ X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
{ X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
{ X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
// AVX 256-bit support
{ X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr },
{ X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm },
{ X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
{ X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
{ X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
{ X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
{ X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm },
{ X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr },
{ X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi },
{ X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri },
// AVX512 support
{ X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr },
{ X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
{ X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
{ X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr },
{ X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr },
{ X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr },
{ X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm },
{ X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm },
{ X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm },
{ X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm },
{ X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r },
{ X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m },
{ X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r },
{ X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr },
{ X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm },
{ X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r },
{ X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r },
{ X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr },
{ X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm },
{ X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr },
{ X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm },
{ X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr },
{ X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm },
{ X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr },
{ X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm },
{ X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr },
{ X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm },
{ X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
{ X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
{ X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
{ X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
{ X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr },
{ X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr },
{ X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr },
{ X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr },
{ X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr },
{ X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr },
{ X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr },
{ X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr },
{ X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
{ X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
{ X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
{ X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
{ X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi },
{ X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri },
{ X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi },
{ X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri },
{ X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi },
{ X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri },
{ X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi },
{ X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri },
{ X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm },
{ X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr },
{ X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi },
{ X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri },
{ X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm },
{ X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr },
{ X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm },
{ X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr },
{ X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi },
{ X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri },
{ X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm },
{ X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr },
{ X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm },
{ X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr },
{ X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm },
{ X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr },
{ X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm },
{ X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr },
{ X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm },
{ X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr },
{ X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm },
{ X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr },
{ X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm },
{ X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr },
{ X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm },
{ X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr },
{ X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm },
{ X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr },
{ X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm },
{ X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr },
{ X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm },
{ X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr },
{ X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm },
{ X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr },
{ X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm },
{ X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr },
{ X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr },
{ X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr },
};
static const uint16_t ReplaceableInstrsAVX2[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
{ X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
{ X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
{ X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
{ X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
{ X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
{ X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
{ X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
{ X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
{ X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
{ X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
{ X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
{ X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
{ X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
{ X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
{ X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
{ X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 },
{ X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi },
{ X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi },
{ X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri },
{ X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm },
{ X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr },
{ X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm },
{ X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr },
{ X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm },
{ X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr },
{ X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm },
{ X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr },
};
static const uint16_t ReplaceableInstrsFP[][3] = {
//PackedSingle PackedDouble
{ X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END },
{ X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END },
{ X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END },
{ X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END },
{ X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END },
{ X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END },
{ X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
{ X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
{ X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
};
static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
{ X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
{ X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
{ X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
};
static const uint16_t ReplaceableInstrsAVX512[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr },
{ X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm },
{ X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr },
{ X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr },
{ X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm },
{ X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr },
{ X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm },
{ X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr },
{ X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr },
{ X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm },
{ X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr },
{ X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm },
{ X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
{ X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
{ X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
};
static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
{ X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
{ X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
{ X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
{ X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm },
{ X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr },
{ X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
{ X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
{ X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
{ X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
{ X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
{ X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
{ X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm },
{ X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr },
{ X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
{ X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
{ X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm },
{ X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr },
{ X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm },
{ X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr },
{ X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm },
{ X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr },
{ X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm },
{ X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr },
};
static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble
//PackedInt PackedInt
{ X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk,
X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk },
{ X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
{ X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk,
X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk },
{ X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
{ X86::VANDPSZ128rmk, X86::VANDPDZ128rmk,
X86::VPANDQZ128rmk, X86::VPANDDZ128rmk },
{ X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz,
X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz },
{ X86::VANDPSZ128rrk, X86::VANDPDZ128rrk,
X86::VPANDQZ128rrk, X86::VPANDDZ128rrk },
{ X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz,
X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz },
{ X86::VORPSZ128rmk, X86::VORPDZ128rmk,
X86::VPORQZ128rmk, X86::VPORDZ128rmk },
{ X86::VORPSZ128rmkz, X86::VORPDZ128rmkz,
X86::VPORQZ128rmkz, X86::VPORDZ128rmkz },
{ X86::VORPSZ128rrk, X86::VORPDZ128rrk,
X86::VPORQZ128rrk, X86::VPORDZ128rrk },
{ X86::VORPSZ128rrkz, X86::VORPDZ128rrkz,
X86::VPORQZ128rrkz, X86::VPORDZ128rrkz },
{ X86::VXORPSZ128rmk, X86::VXORPDZ128rmk,
X86::VPXORQZ128rmk, X86::VPXORDZ128rmk },
{ X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz,
X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz },
{ X86::VXORPSZ128rrk, X86::VXORPDZ128rrk,
X86::VPXORQZ128rrk, X86::VPXORDZ128rrk },
{ X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz,
X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz },
{ X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk,
X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk },
{ X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
{ X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk,
X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk },
{ X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
{ X86::VANDPSZ256rmk, X86::VANDPDZ256rmk,
X86::VPANDQZ256rmk, X86::VPANDDZ256rmk },
{ X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz,
X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz },
{ X86::VANDPSZ256rrk, X86::VANDPDZ256rrk,
X86::VPANDQZ256rrk, X86::VPANDDZ256rrk },
{ X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz,
X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz },
{ X86::VORPSZ256rmk, X86::VORPDZ256rmk,
X86::VPORQZ256rmk, X86::VPORDZ256rmk },
{ X86::VORPSZ256rmkz, X86::VORPDZ256rmkz,
X86::VPORQZ256rmkz, X86::VPORDZ256rmkz },
{ X86::VORPSZ256rrk, X86::VORPDZ256rrk,
X86::VPORQZ256rrk, X86::VPORDZ256rrk },
{ X86::VORPSZ256rrkz, X86::VORPDZ256rrkz,
X86::VPORQZ256rrkz, X86::VPORDZ256rrkz },
{ X86::VXORPSZ256rmk, X86::VXORPDZ256rmk,
X86::VPXORQZ256rmk, X86::VPXORDZ256rmk },
{ X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz,
X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz },
{ X86::VXORPSZ256rrk, X86::VXORPDZ256rrk,
X86::VPXORQZ256rrk, X86::VPXORDZ256rrk },
{ X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz,
X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz },
{ X86::VANDNPSZrmk, X86::VANDNPDZrmk,
X86::VPANDNQZrmk, X86::VPANDNDZrmk },
{ X86::VANDNPSZrmkz, X86::VANDNPDZrmkz,
X86::VPANDNQZrmkz, X86::VPANDNDZrmkz },
{ X86::VANDNPSZrrk, X86::VANDNPDZrrk,
X86::VPANDNQZrrk, X86::VPANDNDZrrk },
{ X86::VANDNPSZrrkz, X86::VANDNPDZrrkz,
X86::VPANDNQZrrkz, X86::VPANDNDZrrkz },
{ X86::VANDPSZrmk, X86::VANDPDZrmk,
X86::VPANDQZrmk, X86::VPANDDZrmk },
{ X86::VANDPSZrmkz, X86::VANDPDZrmkz,
X86::VPANDQZrmkz, X86::VPANDDZrmkz },
{ X86::VANDPSZrrk, X86::VANDPDZrrk,
X86::VPANDQZrrk, X86::VPANDDZrrk },
{ X86::VANDPSZrrkz, X86::VANDPDZrrkz,
X86::VPANDQZrrkz, X86::VPANDDZrrkz },
{ X86::VORPSZrmk, X86::VORPDZrmk,
X86::VPORQZrmk, X86::VPORDZrmk },
{ X86::VORPSZrmkz, X86::VORPDZrmkz,
X86::VPORQZrmkz, X86::VPORDZrmkz },
{ X86::VORPSZrrk, X86::VORPDZrrk,
X86::VPORQZrrk, X86::VPORDZrrk },
{ X86::VORPSZrrkz, X86::VORPDZrrkz,
X86::VPORQZrrkz, X86::VPORDZrrkz },
{ X86::VXORPSZrmk, X86::VXORPDZrmk,
X86::VPXORQZrmk, X86::VPXORDZrmk },
{ X86::VXORPSZrmkz, X86::VXORPDZrmkz,
X86::VPXORQZrmkz, X86::VPXORDZrmkz },
{ X86::VXORPSZrrk, X86::VXORPDZrrk,
X86::VPXORQZrrk, X86::VPXORDZrrk },
{ X86::VXORPSZrrkz, X86::VXORPDZrrkz,
X86::VPXORQZrrkz, X86::VPXORDZrrkz },
// Broadcast loads can be handled the same as masked operations to avoid
// changing element size.
{ X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb,
X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb },
{ X86::VANDPSZ128rmb, X86::VANDPDZ128rmb,
X86::VPANDQZ128rmb, X86::VPANDDZ128rmb },
{ X86::VORPSZ128rmb, X86::VORPDZ128rmb,
X86::VPORQZ128rmb, X86::VPORDZ128rmb },
{ X86::VXORPSZ128rmb, X86::VXORPDZ128rmb,
X86::VPXORQZ128rmb, X86::VPXORDZ128rmb },
{ X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb,
X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb },
{ X86::VANDPSZ256rmb, X86::VANDPDZ256rmb,
X86::VPANDQZ256rmb, X86::VPANDDZ256rmb },
{ X86::VORPSZ256rmb, X86::VORPDZ256rmb,
X86::VPORQZ256rmb, X86::VPORDZ256rmb },
{ X86::VXORPSZ256rmb, X86::VXORPDZ256rmb,
X86::VPXORQZ256rmb, X86::VPXORDZ256rmb },
{ X86::VANDNPSZrmb, X86::VANDNPDZrmb,
X86::VPANDNQZrmb, X86::VPANDNDZrmb },
{ X86::VANDPSZrmb, X86::VANDPDZrmb,
X86::VPANDQZrmb, X86::VPANDDZrmb },
{ X86::VANDPSZrmb, X86::VANDPDZrmb,
X86::VPANDQZrmb, X86::VPANDDZrmb },
{ X86::VORPSZrmb, X86::VORPDZrmb,
X86::VPORQZrmb, X86::VPORDZrmb },
{ X86::VXORPSZrmb, X86::VXORPDZrmb,
X86::VPXORQZrmb, X86::VPXORDZrmb },
{ X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
{ X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk,
X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk },
{ X86::VORPSZ128rmbk, X86::VORPDZ128rmbk,
X86::VPORQZ128rmbk, X86::VPORDZ128rmbk },
{ X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk,
X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk },
{ X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
{ X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk,
X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk },
{ X86::VORPSZ256rmbk, X86::VORPDZ256rmbk,
X86::VPORQZ256rmbk, X86::VPORDZ256rmbk },
{ X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk,
X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk },
{ X86::VANDNPSZrmbk, X86::VANDNPDZrmbk,
X86::VPANDNQZrmbk, X86::VPANDNDZrmbk },
{ X86::VANDPSZrmbk, X86::VANDPDZrmbk,
X86::VPANDQZrmbk, X86::VPANDDZrmbk },
{ X86::VANDPSZrmbk, X86::VANDPDZrmbk,
X86::VPANDQZrmbk, X86::VPANDDZrmbk },
{ X86::VORPSZrmbk, X86::VORPDZrmbk,
X86::VPORQZrmbk, X86::VPORDZrmbk },
{ X86::VXORPSZrmbk, X86::VXORPDZrmbk,
X86::VPXORQZrmbk, X86::VPXORDZrmbk },
{ X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
{ X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
{ X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz,
X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz },
{ X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
{ X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
{ X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
{ X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz,
X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz },
{ X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
{ X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz,
X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz },
{ X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
{ X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
{ X86::VORPSZrmbkz, X86::VORPDZrmbkz,
X86::VPORQZrmbkz, X86::VPORDZrmbkz },
{ X86::VXORPSZrmbkz, X86::VXORPDZrmbkz,
X86::VPXORQZrmbkz, X86::VPXORDZrmbkz },
};
// NOTE: These should only be used by the custom domain methods.
static const uint16_t ReplaceableBlendInstrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi },
{ X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri },
{ X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi },
{ X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi },
{ X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri },
};
static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi },
{ X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi },
{ X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri },
};
// Special table for changing EVEX logic instructions to VEX.
// TODO: Should we run EVEX->VEX earlier?
static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
{ X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
{ X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
{ X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm },
{ X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr },
{ X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
{ X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
{ X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
{ X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
{ X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
{ X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
{ X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm },
{ X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr },
{ X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
{ X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
};
// FIXME: Some shuffle and unpack instructions have equivalents in different
// domains, but they require a bit more work than just switching opcodes.
static const uint16_t *lookup(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[3]> Table) {
for (const uint16_t (&Row)[3] : Table)
if (Row[domain-1] == opcode)
return Row;
return nullptr;
}
static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[4]> Table) {
// If this is the integer domain make sure to check both integer columns.
for (const uint16_t (&Row)[4] : Table)
if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
return Row;
return nullptr;
}
// Helper to attempt to widen/narrow blend masks.
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
unsigned NewWidth, unsigned *pNewMask = nullptr) {
assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
"Illegal blend mask scale");
unsigned NewMask = 0;
if ((OldWidth % NewWidth) == 0) {
unsigned Scale = OldWidth / NewWidth;
unsigned SubMask = (1u << Scale) - 1;
for (unsigned i = 0; i != NewWidth; ++i) {
unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
if (Sub == SubMask)
NewMask |= (1u << i);
else if (Sub != 0x0)
return false;
}
} else {
unsigned Scale = NewWidth / OldWidth;
unsigned SubMask = (1u << Scale) - 1;
for (unsigned i = 0; i != OldWidth; ++i) {
if (OldMask & (1 << i)) {
NewMask |= (SubMask << (i * Scale));
}
}
}
if (pNewMask)
*pNewMask = NewMask;
return true;
}
uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
unsigned NumOperands = MI.getDesc().getNumOperands();
auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
uint16_t validDomains = 0;
if (MI.getOperand(NumOperands - 1).isImm()) {
unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
validDomains |= 0x2; // PackedSingle
if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
validDomains |= 0x4; // PackedDouble
if (!Is256 || Subtarget.hasAVX2())
validDomains |= 0x8; // PackedInt
}
return validDomains;
};
switch (Opcode) {
case X86::BLENDPDrmi:
case X86::BLENDPDrri:
case X86::VBLENDPDrmi:
case X86::VBLENDPDrri:
return GetBlendDomains(2, false);
case X86::VBLENDPDYrmi:
case X86::VBLENDPDYrri:
return GetBlendDomains(4, true);
case X86::BLENDPSrmi:
case X86::BLENDPSrri:
case X86::VBLENDPSrmi:
case X86::VBLENDPSrri:
case X86::VPBLENDDrmi:
case X86::VPBLENDDrri:
return GetBlendDomains(4, false);
case X86::VBLENDPSYrmi:
case X86::VBLENDPSYrri:
case X86::VPBLENDDYrmi:
case X86::VPBLENDDYrri:
return GetBlendDomains(8, true);
case X86::PBLENDWrmi:
case X86::PBLENDWrri:
case X86::VPBLENDWrmi:
case X86::VPBLENDWrri:
// Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
case X86::VPBLENDWYrmi:
case X86::VPBLENDWYrri:
return GetBlendDomains(8, false);
case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
case X86::VPORDZ128rr: case X86::VPORDZ128rm:
case X86::VPORDZ256rr: case X86::VPORDZ256rm:
case X86::VPORQZ128rr: case X86::VPORQZ128rm:
case X86::VPORQZ256rr: case X86::VPORQZ256rm:
case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
case X86::VPXORQZ256rr: case X86::VPXORQZ256rm:
// If we don't have DQI see if we can still switch from an EVEX integer
// instruction to a VEX floating point instruction.
if (Subtarget.hasDQI())
return 0;
if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
return 0;
if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
return 0;
// Register forms will have 3 operands. Memory form will have more.
if (NumOperands == 3 &&
RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
return 0;
// All domains are valid.
return 0xe;
case X86::MOVHLPSrr:
// We can swap domains when both inputs are the same register.
// FIXME: This doesn't catch all the cases we would like. If the input
// register isn't KILLed by the instruction, the two address instruction
// pass puts a COPY on one input. The other input uses the original
// register. This prevents the same physical register from being used by
// both inputs.
if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
MI.getOperand(0).getSubReg() == 0 &&
MI.getOperand(1).getSubReg() == 0 &&
MI.getOperand(2).getSubReg() == 0)
return 0x6;
return 0;
case X86::SHUFPDrri:
return 0x6;
}
return 0;
}
bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
unsigned Domain) const {
assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
unsigned Opcode = MI.getOpcode();
unsigned NumOperands = MI.getDesc().getNumOperands();
auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
if (MI.getOperand(NumOperands - 1).isImm()) {
unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
unsigned NewImm = Imm;
const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
if (!table)
table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
if (Domain == 1) { // PackedSingle
AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
} else if (Domain == 2) { // PackedDouble
AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
} else if (Domain == 3) { // PackedInt
if (Subtarget.hasAVX2()) {
// If we are already VPBLENDW use that, else use VPBLENDD.
if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
}
} else {
assert(!Is256 && "128-bit vector expected");
AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
}
}
assert(table && table[Domain - 1] && "Unknown domain op");
MI.setDesc(get(table[Domain - 1]));
MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
}
return true;
};
switch (Opcode) {
case X86::BLENDPDrmi:
case X86::BLENDPDrri:
case X86::VBLENDPDrmi:
case X86::VBLENDPDrri:
return SetBlendDomain(2, false);
case X86::VBLENDPDYrmi:
case X86::VBLENDPDYrri:
return SetBlendDomain(4, true);
case X86::BLENDPSrmi:
case X86::BLENDPSrri:
case X86::VBLENDPSrmi:
case X86::VBLENDPSrri:
case X86::VPBLENDDrmi:
case X86::VPBLENDDrri:
return SetBlendDomain(4, false);
case X86::VBLENDPSYrmi:
case X86::VBLENDPSYrri:
case X86::VPBLENDDYrmi:
case X86::VPBLENDDYrri:
return SetBlendDomain(8, true);
case X86::PBLENDWrmi:
case X86::PBLENDWrri:
case X86::VPBLENDWrmi:
case X86::VPBLENDWrri:
return SetBlendDomain(8, false);
case X86::VPBLENDWYrmi:
case X86::VPBLENDWYrri:
return SetBlendDomain(16, true);
case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
case X86::VPORDZ128rr: case X86::VPORDZ128rm:
case X86::VPORDZ256rr: case X86::VPORDZ256rm:
case X86::VPORQZ128rr: case X86::VPORQZ128rm:
case X86::VPORQZ256rr: case X86::VPORQZ256rm:
case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: {
// Without DQI, convert EVEX instructions to VEX instructions.
if (Subtarget.hasDQI())
return false;
const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
ReplaceableCustomAVX512LogicInstrs);
assert(table && "Instruction not found in table?");
// Don't change integer Q instructions to D instructions and
// use D intructions if we started with a PS instruction.
if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
MI.setDesc(get(table[Domain - 1]));
return true;
}
case X86::UNPCKHPDrr:
case X86::MOVHLPSrr:
// We just need to commute the instruction which will switch the domains.
if (Domain != dom && Domain != 3 &&
MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
MI.getOperand(0).getSubReg() == 0 &&
MI.getOperand(1).getSubReg() == 0 &&
MI.getOperand(2).getSubReg() == 0) {
commuteInstruction(MI, false);
return true;
}
// We must always return true for MOVHLPSrr.
if (Opcode == X86::MOVHLPSrr)
return true;
break;
case X86::SHUFPDrri: {
if (Domain == 1) {
unsigned Imm = MI.getOperand(3).getImm();
unsigned NewImm = 0x44;
if (Imm & 1) NewImm |= 0x0a;
if (Imm & 2) NewImm |= 0xa0;
MI.getOperand(3).setImm(NewImm);
MI.setDesc(get(X86::SHUFPSrri));
}
return true;
}
}
return false;
}
std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
unsigned opcode = MI.getOpcode();
uint16_t validDomains = 0;
if (domain) {
// Attempt to match for custom instructions.
validDomains = getExecutionDomainCustom(MI);
if (validDomains)
return std::make_pair(domain, validDomains);
if (lookup(opcode, domain, ReplaceableInstrs)) {
validDomains = 0xe;
} else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
} else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
validDomains = 0x6;
} else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
// Insert/extract instructions should only effect domain if AVX2
// is enabled.
if (!Subtarget.hasAVX2())
return std::make_pair(0, 0);
validDomains = 0xe;
} else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
validDomains = 0xe;
} else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
ReplaceableInstrsAVX512DQ)) {
validDomains = 0xe;
} else if (Subtarget.hasDQI()) {
if (const uint16_t *table = lookupAVX512(opcode, domain,
ReplaceableInstrsAVX512DQMasked)) {
if (domain == 1 || (domain == 3 && table[3] == opcode))
validDomains = 0xa;
else
validDomains = 0xc;
}
}
}
return std::make_pair(domain, validDomains);
}
void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
assert(Domain>0 && Domain<4 && "Invalid execution domain");
uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
// Attempt to match for custom instructions.
if (setExecutionDomainCustom(MI, Domain))
return;
const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
if (!table) { // try the other table
assert((Subtarget.hasAVX2() || Domain < 3) &&
"256-bit vector operations only available in AVX2");
table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
}
if (!table) { // try the FP table
table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
assert((!table || Domain < 3) &&
"Can only select PackedSingle or PackedDouble");
}
if (!table) { // try the other table
assert(Subtarget.hasAVX2() &&
"256-bit insert/extract only available in AVX2");
table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
}
if (!table) { // try the AVX512 table
assert(Subtarget.hasAVX512() && "Requires AVX-512");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
// Don't change integer Q instructions to D instructions.
if (table && Domain == 3 && table[3] == MI.getOpcode())
Domain = 4;
}
if (!table) { // try the AVX512DQ table
assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
// Don't change integer Q instructions to D instructions and
// use D intructions if we started with a PS instruction.
if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
}
if (!table) { // try the AVX512DQMasked table
assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
}
assert(table && "Cannot change domain");
MI.setDesc(get(table[Domain - 1]));
}
/// Return the noop instruction to use for a noop.
void X86InstrInfo::getNoop(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
bool X86InstrInfo::isHighLatencyDef(int opc) const {
switch (opc) {
default: return false;
case X86::DIVPDrm:
case X86::DIVPDrr:
case X86::DIVPSrm:
case X86::DIVPSrr:
case X86::DIVSDrm:
case X86::DIVSDrm_Int:
case X86::DIVSDrr:
case X86::DIVSDrr_Int:
case X86::DIVSSrm:
case X86::DIVSSrm_Int:
case X86::DIVSSrr:
case X86::DIVSSrr_Int:
case X86::SQRTPDm:
case X86::SQRTPDr:
case X86::SQRTPSm:
case X86::SQRTPSr:
case X86::SQRTSDm:
case X86::SQRTSDm_Int:
case X86::SQRTSDr:
case X86::SQRTSDr_Int:
case X86::SQRTSSm:
case X86::SQRTSSm_Int:
case X86::SQRTSSr:
case X86::SQRTSSr_Int:
// AVX instructions with high latency
case X86::VDIVPDrm:
case X86::VDIVPDrr:
case X86::VDIVPDYrm:
case X86::VDIVPDYrr:
case X86::VDIVPSrm:
case X86::VDIVPSrr:
case X86::VDIVPSYrm:
case X86::VDIVPSYrr:
case X86::VDIVSDrm:
case X86::VDIVSDrm_Int:
case X86::VDIVSDrr:
case X86::VDIVSDrr_Int:
case X86::VDIVSSrm:
case X86::VDIVSSrm_Int:
case X86::VDIVSSrr:
case X86::VDIVSSrr_Int:
case X86::VSQRTPDm:
case X86::VSQRTPDr:
case X86::VSQRTPDYm:
case X86::VSQRTPDYr:
case X86::VSQRTPSm:
case X86::VSQRTPSr:
case X86::VSQRTPSYm:
case X86::VSQRTPSYr:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
case X86::VSQRTSDr:
case X86::VSQRTSDr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSSr:
case X86::VSQRTSSr_Int:
// AVX512 instructions with high latency
case X86::VDIVPDZ128rm:
case X86::VDIVPDZ128rmb:
case X86::VDIVPDZ128rmbk:
case X86::VDIVPDZ128rmbkz:
case X86::VDIVPDZ128rmk:
case X86::VDIVPDZ128rmkz:
case X86::VDIVPDZ128rr:
case X86::VDIVPDZ128rrk:
case X86::VDIVPDZ128rrkz:
case X86::VDIVPDZ256rm:
case X86::VDIVPDZ256rmb:
case X86::VDIVPDZ256rmbk:
case X86::VDIVPDZ256rmbkz:
case X86::VDIVPDZ256rmk:
case X86::VDIVPDZ256rmkz:
case X86::VDIVPDZ256rr:
case X86::VDIVPDZ256rrk:
case X86::VDIVPDZ256rrkz:
case X86::VDIVPDZrrb:
case X86::VDIVPDZrrbk:
case X86::VDIVPDZrrbkz:
case X86::VDIVPDZrm:
case X86::VDIVPDZrmb:
case X86::VDIVPDZrmbk:
case X86::VDIVPDZrmbkz:
case X86::VDIVPDZrmk:
case X86::VDIVPDZrmkz:
case X86::VDIVPDZrr:
case X86::VDIVPDZrrk:
case X86::VDIVPDZrrkz:
case X86::VDIVPSZ128rm:
case X86::VDIVPSZ128rmb:
case X86::VDIVPSZ128rmbk:
case X86::VDIVPSZ128rmbkz:
case X86::VDIVPSZ128rmk:
case X86::VDIVPSZ128rmkz:
case X86::VDIVPSZ128rr:
case X86::VDIVPSZ128rrk:
case X86::VDIVPSZ128rrkz:
case X86::VDIVPSZ256rm:
case X86::VDIVPSZ256rmb:
case X86::VDIVPSZ256rmbk:
case X86::VDIVPSZ256rmbkz:
case X86::VDIVPSZ256rmk:
case X86::VDIVPSZ256rmkz:
case X86::VDIVPSZ256rr:
case X86::VDIVPSZ256rrk:
case X86::VDIVPSZ256rrkz:
case X86::VDIVPSZrrb:
case X86::VDIVPSZrrbk:
case X86::VDIVPSZrrbkz:
case X86::VDIVPSZrm:
case X86::VDIVPSZrmb:
case X86::VDIVPSZrmbk:
case X86::VDIVPSZrmbkz:
case X86::VDIVPSZrmk:
case X86::VDIVPSZrmkz:
case X86::VDIVPSZrr:
case X86::VDIVPSZrrk:
case X86::VDIVPSZrrkz:
case X86::VDIVSDZrm:
case X86::VDIVSDZrr:
case X86::VDIVSDZrm_Int:
case X86::VDIVSDZrm_Intk:
case X86::VDIVSDZrm_Intkz:
case X86::VDIVSDZrr_Int:
case X86::VDIVSDZrr_Intk:
case X86::VDIVSDZrr_Intkz:
case X86::VDIVSDZrrb_Int:
case X86::VDIVSDZrrb_Intk:
case X86::VDIVSDZrrb_Intkz:
case X86::VDIVSSZrm:
case X86::VDIVSSZrr:
case X86::VDIVSSZrm_Int:
case X86::VDIVSSZrm_Intk:
case X86::VDIVSSZrm_Intkz:
case X86::VDIVSSZrr_Int:
case X86::VDIVSSZrr_Intk:
case X86::VDIVSSZrr_Intkz:
case X86::VDIVSSZrrb_Int:
case X86::VDIVSSZrrb_Intk:
case X86::VDIVSSZrrb_Intkz:
case X86::VSQRTPDZ128m:
case X86::VSQRTPDZ128mb:
case X86::VSQRTPDZ128mbk:
case X86::VSQRTPDZ128mbkz:
case X86::VSQRTPDZ128mk:
case X86::VSQRTPDZ128mkz:
case X86::VSQRTPDZ128r:
case X86::VSQRTPDZ128rk:
case X86::VSQRTPDZ128rkz:
case X86::VSQRTPDZ256m:
case X86::VSQRTPDZ256mb:
case X86::VSQRTPDZ256mbk:
case X86::VSQRTPDZ256mbkz:
case X86::VSQRTPDZ256mk:
case X86::VSQRTPDZ256mkz:
case X86::VSQRTPDZ256r:
case X86::VSQRTPDZ256rk:
case X86::VSQRTPDZ256rkz:
case X86::VSQRTPDZm:
case X86::VSQRTPDZmb:
case X86::VSQRTPDZmbk:
case X86::VSQRTPDZmbkz:
case X86::VSQRTPDZmk:
case X86::VSQRTPDZmkz:
case X86::VSQRTPDZr:
case X86::VSQRTPDZrb:
case X86::VSQRTPDZrbk:
case X86::VSQRTPDZrbkz:
case X86::VSQRTPDZrk:
case X86::VSQRTPDZrkz:
case X86::VSQRTPSZ128m:
case X86::VSQRTPSZ128mb:
case X86::VSQRTPSZ128mbk:
case X86::VSQRTPSZ128mbkz:
case X86::VSQRTPSZ128mk:
case X86::VSQRTPSZ128mkz:
case X86::VSQRTPSZ128r:
case X86::VSQRTPSZ128rk:
case X86::VSQRTPSZ128rkz:
case X86::VSQRTPSZ256m:
case X86::VSQRTPSZ256mb:
case X86::VSQRTPSZ256mbk:
case X86::VSQRTPSZ256mbkz:
case X86::VSQRTPSZ256mk:
case X86::VSQRTPSZ256mkz:
case X86::VSQRTPSZ256r:
case X86::VSQRTPSZ256rk:
case X86::VSQRTPSZ256rkz:
case X86::VSQRTPSZm:
case X86::VSQRTPSZmb:
case X86::VSQRTPSZmbk:
case X86::VSQRTPSZmbkz:
case X86::VSQRTPSZmk:
case X86::VSQRTPSZmkz:
case X86::VSQRTPSZr:
case X86::VSQRTPSZrb:
case X86::VSQRTPSZrbk:
case X86::VSQRTPSZrbkz:
case X86::VSQRTPSZrk:
case X86::VSQRTPSZrkz:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
case X86::VSQRTSDZm_Intk:
case X86::VSQRTSDZm_Intkz:
case X86::VSQRTSDZr:
case X86::VSQRTSDZr_Int:
case X86::VSQRTSDZr_Intk:
case X86::VSQRTSDZr_Intkz:
case X86::VSQRTSDZrb_Int:
case X86::VSQRTSDZrb_Intk:
case X86::VSQRTSDZrb_Intkz:
case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
case X86::VSQRTSSZm_Intk:
case X86::VSQRTSSZm_Intkz:
case X86::VSQRTSSZr:
case X86::VSQRTSSZr_Int:
case X86::VSQRTSSZr_Intk:
case X86::VSQRTSSZr_Intkz:
case X86::VSQRTSSZrb_Int:
case X86::VSQRTSSZrb_Intk:
case X86::VSQRTSSZrb_Intkz:
case X86::VGATHERDPDYrm:
case X86::VGATHERDPDZ128rm:
case X86::VGATHERDPDZ256rm:
case X86::VGATHERDPDZrm:
case X86::VGATHERDPDrm:
case X86::VGATHERDPSYrm:
case X86::VGATHERDPSZ128rm:
case X86::VGATHERDPSZ256rm:
case X86::VGATHERDPSZrm:
case X86::VGATHERDPSrm:
case X86::VGATHERPF0DPDm:
case X86::VGATHERPF0DPSm:
case X86::VGATHERPF0QPDm:
case X86::VGATHERPF0QPSm:
case X86::VGATHERPF1DPDm:
case X86::VGATHERPF1DPSm:
case X86::VGATHERPF1QPDm:
case X86::VGATHERPF1QPSm:
case X86::VGATHERQPDYrm:
case X86::VGATHERQPDZ128rm:
case X86::VGATHERQPDZ256rm:
case X86::VGATHERQPDZrm:
case X86::VGATHERQPDrm:
case X86::VGATHERQPSYrm:
case X86::VGATHERQPSZ128rm:
case X86::VGATHERQPSZ256rm:
case X86::VGATHERQPSZrm:
case X86::VGATHERQPSrm:
case X86::VPGATHERDDYrm:
case X86::VPGATHERDDZ128rm:
case X86::VPGATHERDDZ256rm:
case X86::VPGATHERDDZrm:
case X86::VPGATHERDDrm:
case X86::VPGATHERDQYrm:
case X86::VPGATHERDQZ128rm:
case X86::VPGATHERDQZ256rm:
case X86::VPGATHERDQZrm:
case X86::VPGATHERDQrm:
case X86::VPGATHERQDYrm:
case X86::VPGATHERQDZ128rm:
case X86::VPGATHERQDZ256rm:
case X86::VPGATHERQDZrm:
case X86::VPGATHERQDrm:
case X86::VPGATHERQQYrm:
case X86::VPGATHERQQZ128rm:
case X86::VPGATHERQQZ256rm:
case X86::VPGATHERQQZrm:
case X86::VPGATHERQQrm:
case X86::VSCATTERDPDZ128mr:
case X86::VSCATTERDPDZ256mr:
case X86::VSCATTERDPDZmr:
case X86::VSCATTERDPSZ128mr:
case X86::VSCATTERDPSZ256mr:
case X86::VSCATTERDPSZmr:
case X86::VSCATTERPF0DPDm:
case X86::VSCATTERPF0DPSm:
case X86::VSCATTERPF0QPDm:
case X86::VSCATTERPF0QPSm:
case X86::VSCATTERPF1DPDm:
case X86::VSCATTERPF1DPSm:
case X86::VSCATTERPF1QPDm:
case X86::VSCATTERPF1QPSm:
case X86::VSCATTERQPDZ128mr:
case X86::VSCATTERQPDZ256mr:
case X86::VSCATTERQPDZmr:
case X86::VSCATTERQPSZ128mr:
case X86::VSCATTERQPSZ256mr:
case X86::VSCATTERQPSZmr:
case X86::VPSCATTERDDZ128mr:
case X86::VPSCATTERDDZ256mr:
case X86::VPSCATTERDDZmr:
case X86::VPSCATTERDQZ128mr:
case X86::VPSCATTERDQZ256mr:
case X86::VPSCATTERDQZmr:
case X86::VPSCATTERQDZ128mr:
case X86::VPSCATTERQDZ256mr:
case X86::VPSCATTERQDZmr:
case X86::VPSCATTERQQZ128mr:
case X86::VPSCATTERQQZ256mr:
case X86::VPSCATTERQQZmr:
return true;
}
}
bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
return isHighLatencyDef(DefMI.getOpcode());
}
bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
const MachineBasicBlock *MBB) const {
assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) &&
"Reassociation needs binary operators");
// Integer binary math/logic instructions have a third source operand:
// the EFLAGS register. That operand must be both defined here and never
// used; ie, it must be dead. If the EFLAGS operand is live, then we can
// not change anything because rearranging the operands could affect other
// instructions that depend on the exact status flags (zero, sign, etc.)
// that are set by using these particular operands with this operation.
if (Inst.getNumOperands() == 4) {
assert(Inst.getOperand(3).isReg() &&
Inst.getOperand(3).getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
if (!Inst.getOperand(3).isDead())
return false;
}
return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
}
// TODO: There are many more machine instruction opcodes to match:
// 1. Other data types (integer, vectors)
// 2. Other math / logic operations (xor, or)
// 3. Other forms of the same operation (intrinsics and other variants)
bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
switch (Inst.getOpcode()) {
case X86::AND8rr:
case X86::AND16rr:
case X86::AND32rr:
case X86::AND64rr:
case X86::OR8rr:
case X86::OR16rr:
case X86::OR32rr:
case X86::OR64rr:
case X86::XOR8rr:
case X86::XOR16rr:
case X86::XOR32rr:
case X86::XOR64rr:
case X86::IMUL16rr:
case X86::IMUL32rr:
case X86::IMUL64rr:
case X86::PANDrr:
case X86::PORrr:
case X86::PXORrr:
case X86::ANDPDrr:
case X86::ANDPSrr:
case X86::ORPDrr:
case X86::ORPSrr:
case X86::XORPDrr:
case X86::XORPSrr:
case X86::PADDBrr:
case X86::PADDWrr:
case X86::PADDDrr:
case X86::PADDQrr:
case X86::PMULLWrr:
case X86::PMULLDrr:
case X86::PMAXSBrr:
case X86::PMAXSDrr:
case X86::PMAXSWrr:
case X86::PMAXUBrr:
case X86::PMAXUDrr:
case X86::PMAXUWrr:
case X86::PMINSBrr:
case X86::PMINSDrr:
case X86::PMINSWrr:
case X86::PMINUBrr:
case X86::PMINUDrr:
case X86::PMINUWrr:
case X86::VPANDrr:
case X86::VPANDYrr:
case X86::VPANDDZ128rr:
case X86::VPANDDZ256rr:
case X86::VPANDDZrr:
case X86::VPANDQZ128rr:
case X86::VPANDQZ256rr:
case X86::VPANDQZrr:
case X86::VPORrr:
case X86::VPORYrr:
case X86::VPORDZ128rr:
case X86::VPORDZ256rr:
case X86::VPORDZrr:
case X86::VPORQZ128rr:
case X86::VPORQZ256rr:
case X86::VPORQZrr:
case X86::VPXORrr:
case X86::VPXORYrr:
case X86::VPXORDZ128rr:
case X86::VPXORDZ256rr:
case X86::VPXORDZrr:
case X86::VPXORQZ128rr:
case X86::VPXORQZ256rr:
case X86::VPXORQZrr:
case X86::VANDPDrr:
case X86::VANDPSrr:
case X86::VANDPDYrr:
case X86::VANDPSYrr:
case X86::VANDPDZ128rr:
case X86::VANDPSZ128rr:
case X86::VANDPDZ256rr:
case X86::VANDPSZ256rr:
case X86::VANDPDZrr:
case X86::VANDPSZrr:
case X86::VORPDrr:
case X86::VORPSrr:
case X86::VORPDYrr:
case X86::VORPSYrr:
case X86::VORPDZ128rr:
case X86::VORPSZ128rr:
case X86::VORPDZ256rr:
case X86::VORPSZ256rr:
case X86::VORPDZrr:
case X86::VORPSZrr:
case X86::VXORPDrr:
case X86::VXORPSrr:
case X86::VXORPDYrr:
case X86::VXORPSYrr:
case X86::VXORPDZ128rr:
case X86::VXORPSZ128rr:
case X86::VXORPDZ256rr:
case X86::VXORPSZ256rr:
case X86::VXORPDZrr:
case X86::VXORPSZrr:
case X86::KADDBrr:
case X86::KADDWrr:
case X86::KADDDrr:
case X86::KADDQrr:
case X86::KANDBrr:
case X86::KANDWrr:
case X86::KANDDrr:
case X86::KANDQrr:
case X86::KORBrr:
case X86::KORWrr:
case X86::KORDrr:
case X86::KORQrr:
case X86::KXORBrr:
case X86::KXORWrr:
case X86::KXORDrr:
case X86::KXORQrr:
case X86::VPADDBrr:
case X86::VPADDWrr:
case X86::VPADDDrr:
case X86::VPADDQrr:
case X86::VPADDBYrr:
case X86::VPADDWYrr:
case X86::VPADDDYrr:
case X86::VPADDQYrr:
case X86::VPADDBZ128rr:
case X86::VPADDWZ128rr:
case X86::VPADDDZ128rr:
case X86::VPADDQZ128rr:
case X86::VPADDBZ256rr:
case X86::VPADDWZ256rr:
case X86::VPADDDZ256rr:
case X86::VPADDQZ256rr:
case X86::VPADDBZrr:
case X86::VPADDWZrr:
case X86::VPADDDZrr:
case X86::VPADDQZrr:
case X86::VPMULLWrr:
case X86::VPMULLWYrr:
case X86::VPMULLWZ128rr:
case X86::VPMULLWZ256rr:
case X86::VPMULLWZrr:
case X86::VPMULLDrr:
case X86::VPMULLDYrr:
case X86::VPMULLDZ128rr:
case X86::VPMULLDZ256rr:
case X86::VPMULLDZrr:
case X86::VPMULLQZ128rr:
case X86::VPMULLQZ256rr:
case X86::VPMULLQZrr:
case X86::VPMAXSBrr:
case X86::VPMAXSBYrr:
case X86::VPMAXSBZ128rr:
case X86::VPMAXSBZ256rr:
case X86::VPMAXSBZrr:
case X86::VPMAXSDrr:
case X86::VPMAXSDYrr:
case X86::VPMAXSDZ128rr:
case X86::VPMAXSDZ256rr:
case X86::VPMAXSDZrr:
case X86::VPMAXSQZ128rr:
case X86::VPMAXSQZ256rr:
case X86::VPMAXSQZrr:
case X86::VPMAXSWrr:
case X86::VPMAXSWYrr:
case X86::VPMAXSWZ128rr:
case X86::VPMAXSWZ256rr:
case X86::VPMAXSWZrr:
case X86::VPMAXUBrr:
case X86::VPMAXUBYrr:
case X86::VPMAXUBZ128rr:
case X86::VPMAXUBZ256rr:
case X86::VPMAXUBZrr:
case X86::VPMAXUDrr:
case X86::VPMAXUDYrr:
case X86::VPMAXUDZ128rr:
case X86::VPMAXUDZ256rr:
case X86::VPMAXUDZrr:
case X86::VPMAXUQZ128rr:
case X86::VPMAXUQZ256rr:
case X86::VPMAXUQZrr:
case X86::VPMAXUWrr:
case X86::VPMAXUWYrr:
case X86::VPMAXUWZ128rr:
case X86::VPMAXUWZ256rr:
case X86::VPMAXUWZrr:
case X86::VPMINSBrr:
case X86::VPMINSBYrr:
case X86::VPMINSBZ128rr:
case X86::VPMINSBZ256rr:
case X86::VPMINSBZrr:
case X86::VPMINSDrr:
case X86::VPMINSDYrr:
case X86::VPMINSDZ128rr:
case X86::VPMINSDZ256rr:
case X86::VPMINSDZrr:
case X86::VPMINSQZ128rr:
case X86::VPMINSQZ256rr:
case X86::VPMINSQZrr:
case X86::VPMINSWrr:
case X86::VPMINSWYrr:
case X86::VPMINSWZ128rr:
case X86::VPMINSWZ256rr:
case X86::VPMINSWZrr:
case X86::VPMINUBrr:
case X86::VPMINUBYrr:
case X86::VPMINUBZ128rr:
case X86::VPMINUBZ256rr:
case X86::VPMINUBZrr:
case X86::VPMINUDrr:
case X86::VPMINUDYrr:
case X86::VPMINUDZ128rr:
case X86::VPMINUDZ256rr:
case X86::VPMINUDZrr:
case X86::VPMINUQZ128rr:
case X86::VPMINUQZ256rr:
case X86::VPMINUQZrr:
case X86::VPMINUWrr:
case X86::VPMINUWYrr:
case X86::VPMINUWZ128rr:
case X86::VPMINUWZ256rr:
case X86::VPMINUWZrr:
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
case X86::MAXCPDrr:
case X86::MAXCPSrr:
case X86::MAXCSDrr:
case X86::MAXCSSrr:
case X86::MINCPDrr:
case X86::MINCPSrr:
case X86::MINCSDrr:
case X86::MINCSSrr:
case X86::VMAXCPDrr:
case X86::VMAXCPSrr:
case X86::VMAXCPDYrr:
case X86::VMAXCPSYrr:
case X86::VMAXCPDZ128rr:
case X86::VMAXCPSZ128rr:
case X86::VMAXCPDZ256rr:
case X86::VMAXCPSZ256rr:
case X86::VMAXCPDZrr:
case X86::VMAXCPSZrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
case X86::VMAXCSDZrr:
case X86::VMAXCSSZrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
case X86::VMINCPDYrr:
case X86::VMINCPSYrr:
case X86::VMINCPDZ128rr:
case X86::VMINCPSZ128rr:
case X86::VMINCPDZ256rr:
case X86::VMINCPSZ256rr:
case X86::VMINCPDZrr:
case X86::VMINCPSZrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
case X86::VMINCSDZrr:
case X86::VMINCSSZrr:
return true;
case X86::ADDPDrr:
case X86::ADDPSrr:
case X86::ADDSDrr:
case X86::ADDSSrr:
case X86::MULPDrr:
case X86::MULPSrr:
case X86::MULSDrr:
case X86::MULSSrr:
case X86::VADDPDrr:
case X86::VADDPSrr:
case X86::VADDPDYrr:
case X86::VADDPSYrr:
case X86::VADDPDZ128rr:
case X86::VADDPSZ128rr:
case X86::VADDPDZ256rr:
case X86::VADDPSZ256rr:
case X86::VADDPDZrr:
case X86::VADDPSZrr:
case X86::VADDSDrr:
case X86::VADDSSrr:
case X86::VADDSDZrr:
case X86::VADDSSZrr:
case X86::VMULPDrr:
case X86::VMULPSrr:
case X86::VMULPDYrr:
case X86::VMULPSYrr:
case X86::VMULPDZ128rr:
case X86::VMULPSZ128rr:
case X86::VMULPDZ256rr:
case X86::VMULPSZ256rr:
case X86::VMULPDZrr:
case X86::VMULPSZrr:
case X86::VMULSDrr:
case X86::VMULSSrr:
case X86::VMULSDZrr:
case X86::VMULSSZrr:
return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
default:
return false;
}
}
/// This is an architecture-specific helper function of reassociateOps.
/// Set special operand attributes for new instructions after reassociation.
void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
MachineInstr &OldMI2,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {
// Integer instructions define an implicit EFLAGS source register operand as
// the third source (fourth total) operand.
if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4)
return;
assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 &&
"Unexpected instruction type for reassociation");
MachineOperand &OldOp1 = OldMI1.getOperand(3);
MachineOperand &OldOp2 = OldMI2.getOperand(3);
MachineOperand &NewOp1 = NewMI1.getOperand(3);
MachineOperand &NewOp2 = NewMI2.getOperand(3);
assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() &&
"Must have dead EFLAGS operand in reassociable instruction");
assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() &&
"Must have dead EFLAGS operand in reassociable instruction");
(void)OldOp1;
(void)OldOp2;
assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
// Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
// of this pass or other passes. The EFLAGS operands must be dead in these new
// instructions because the EFLAGS operands in the original instructions must
// be dead in order for reassociation to occur.
NewOp1.setIsDead();
NewOp2.setIsDead();
}
std::pair<unsigned, unsigned>
X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
return std::make_pair(TF, 0u);
}
ArrayRef<std::pair<unsigned, const char *>>
X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
using namespace X86II;
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
{MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
{MO_GOT, "x86-got"},
{MO_GOTOFF, "x86-gotoff"},
{MO_GOTPCREL, "x86-gotpcrel"},
{MO_PLT, "x86-plt"},
{MO_TLSGD, "x86-tlsgd"},
{MO_TLSLD, "x86-tlsld"},
{MO_TLSLDM, "x86-tlsldm"},
{MO_GOTTPOFF, "x86-gottpoff"},
{MO_INDNTPOFF, "x86-indntpoff"},
{MO_TPOFF, "x86-tpoff"},
{MO_DTPOFF, "x86-dtpoff"},
{MO_NTPOFF, "x86-ntpoff"},
{MO_GOTNTPOFF, "x86-gotntpoff"},
{MO_DLLIMPORT, "x86-dllimport"},
{MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
{MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
{MO_TLVP, "x86-tlvp"},
{MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
{MO_SECREL, "x86-secrel"},
{MO_COFFSTUB, "x86-coffstub"}};
return makeArrayRef(TargetFlags);
}
namespace {
/// Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
struct CGBR : public MachineFunctionPass {
static char ID;
CGBR() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
// Don't do anything in the 64-bit small and kernel code models. They use
// RIP-relative addressing for everything.
if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
TM->getCodeModel() == CodeModel::Kernel))
return false;
// Only emit a global base reg in PIC mode.
if (!TM->isPositionIndependent())
return false;
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
// If we didn't need a GlobalBaseReg, don't insert code.
if (GlobalBaseReg == 0)
return false;
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF.front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
MachineRegisterInfo &RegInfo = MF.getRegInfo();
const X86InstrInfo *TII = STI.getInstrInfo();
unsigned PC;
if (STI.isPICStyleGOT())
PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
else
PC = GlobalBaseReg;
if (STI.is64Bit()) {
if (TM->getCodeModel() == CodeModel::Medium) {
// In the medium code model, use a RIP-relative LEA to materialize the
// GOT.
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
.addReg(0);
} else if (TM->getCodeModel() == CodeModel::Large) {
// In the large code model, we are aiming for this code, though the
// register allocation may vary:
// leaq .LN$pb(%rip), %rax
// movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
// addq %rcx, %rax
// RAX now holds address of _GLOBAL_OFFSET_TABLE_.
unsigned PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
unsigned GOTReg =
RegInfo.createVirtualRegister(&X86::GR64RegClass);
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addSym(MF.getPICBaseSymbol())
.addReg(0);
std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
X86II::MO_PIC_BASE_OFFSET);
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
.addReg(PBReg, RegState::Kill)
.addReg(GOTReg, RegState::Kill);
} else {
llvm_unreachable("unexpected code model");
}
} else {
// Operand of MovePCtoStack is completely ignored by asm printer. It's
// only used in JIT code emission as displacement to pc.
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
// If we're using vanilla 'GOT' PIC style, we should use relative
// addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
if (STI.isPICStyleGOT()) {
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
// %some_register
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
.addReg(PC)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
X86II::MO_GOT_ABSOLUTE_ADDRESS);
}
}
return true;
}
StringRef getPassName() const override {
return "X86 PIC Global Base Reg Initialization";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char CGBR::ID = 0;
FunctionPass*
llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
namespace {
struct LDTLSCleanup : public MachineFunctionPass {
static char ID;
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
if (skipFunction(MF.getFunction()))
return false;
X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
return false;
}
MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
return VisitNode(DT->getRootNode(), 0);
}
// Visit the dominator subtree rooted at Node in pre-order.
// If TLSBaseAddrReg is non-null, then use that to replace any
// TLS_base_addr instructions. Otherwise, create the register
// when the first such instruction is seen, and then use it
// as we encounter more instructions.
bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
MachineBasicBlock *BB = Node->getBlock();
bool Changed = false;
// Traverse the current block.
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
++I) {
switch (I->getOpcode()) {
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
if (TLSBaseAddrReg)
I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
I = SetRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
break;
}
}
// Visit the children of this block in the dominator tree.
for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
I != E; ++I) {
Changed |= VisitNode(*I, TLSBaseAddrReg);
}
return Changed;
}
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
MachineInstr *Copy =
BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
.addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
I.eraseFromParent();
return Copy;
}
// Create a virtual register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
*TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
? &X86::GR64RegClass
: &X86::GR32RegClass);
// Insert a copy from RAX/EAX to TLSBaseAddrReg.
MachineInstr *Next = I.getNextNode();
MachineInstr *Copy =
BuildMI(*I.getParent(), Next, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
.addReg(is64Bit ? X86::RAX : X86::EAX);
return Copy;
}
StringRef getPassName() const override {
return "Local Dynamic TLS Access Clean-up";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char LDTLSCleanup::ID = 0;
FunctionPass*
llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
/// Constants defining how certain sequences should be outlined.
///
/// \p MachineOutlinerDefault implies that the function is called with a call
/// instruction, and a return must be emitted for the outlined function frame.
///
/// That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> call OUTLINED_FUNCTION I1
/// I3 I2
/// I3
/// ret
///
/// * Call construction overhead: 1 (call instruction)
/// * Frame construction overhead: 1 (return instruction)
///
/// \p MachineOutlinerTailCall implies that the function is being tail called.
/// A jump is emitted instead of a call, and the return is already present in
/// the outlined sequence. That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> jmp OUTLINED_FUNCTION I1
/// ret I2
/// ret
///
/// * Call construction overhead: 1 (jump instruction)
/// * Frame construction overhead: 0 (don't need to return)
///
enum MachineOutlinerClass {
MachineOutlinerDefault,
MachineOutlinerTailCall
};
outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
unsigned SequenceSize =
std::accumulate(RepeatedSequenceLocs[0].front(),
std::next(RepeatedSequenceLocs[0].back()), 0,
[](unsigned Sum, const MachineInstr &MI) {
// FIXME: x86 doesn't implement getInstSizeInBytes, so
// we can't tell the cost. Just assume each instruction
// is one byte.
if (MI.isDebugInstr() || MI.isKill())
return Sum;
return Sum + 1;
});
// FIXME: Use real size in bytes for call and ret instructions.
if (RepeatedSequenceLocs[0].back()->isTerminator()) {
for (outliner::Candidate &C : RepeatedSequenceLocs)
C.setCallInfo(MachineOutlinerTailCall, 1);
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
0, // Number of bytes to emit frame.
MachineOutlinerTailCall // Type of frame.
);
}
for (outliner::Candidate &C : RepeatedSequenceLocs)
C.setCallInfo(MachineOutlinerDefault, 1);
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
MachineOutlinerDefault);
}
bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
const Function &F = MF.getFunction();
// Does the function use a red zone? If it does, then we can't risk messing
// with the stack.
if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
// It could have a red zone. If it does, then we don't want to touch it.
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
if (!X86FI || X86FI->getUsesRedZone())
return false;
}
// If we *don't* want to outline from things that could potentially be deduped
// then return false.
if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// This function is viable for outlining, so return true.
return true;
}
outliner::InstrType
X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
MachineInstr &MI = *MIT;
// Don't allow debug values to impact outlining type.
if (MI.isDebugInstr() || MI.isIndirectDebugValue())
return outliner::InstrType::Invisible;
// At this point, KILL instructions don't really tell us much so we can go
// ahead and skip over them.
if (MI.isKill())
return outliner::InstrType::Invisible;
// Is this a tail call? If yes, we can outline as a tail call.
if (isTailCall(MI))
return outliner::InstrType::Legal;
// Is this the terminator of a basic block?
if (MI.isTerminator() || MI.isReturn()) {
// Does its parent have any successors in its MachineFunction?
if (MI.getParent()->succ_empty())
return outliner::InstrType::Legal;
// It does, so we can't tail call it.
return outliner::InstrType::Illegal;
}
// Don't outline anything that modifies or reads from the stack pointer.
//
// FIXME: There are instructions which are being manually built without
// explicit uses/defs so we also have to check the MCInstrDesc. We should be
// able to remove the extra checks once those are fixed up. For example,
// sometimes we might get something like %rax = POP64r 1. This won't be
// caught by modifiesRegister or readsRegister even though the instruction
// really ought to be formed so that modifiesRegister/readsRegister would
// catch it.
if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
return outliner::InstrType::Illegal;
// Outlined calls change the instruction pointer, so don't read from it.
if (MI.readsRegister(X86::RIP, &RI) ||
MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
return outliner::InstrType::Illegal;
// Positions can't safely be outlined.
if (MI.isPosition())
return outliner::InstrType::Illegal;
// Make sure none of the operands of this instruction do anything tricky.
for (const MachineOperand &MOP : MI.operands())
if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
MOP.isTargetIndex())
return outliner::InstrType::Illegal;
return outliner::InstrType::Legal;
}
void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
MachineFunction &MF,
const outliner::OutlinedFunction &OF)
const {
// If we're a tail call, we already have a return, so don't do anything.
if (OF.FrameConstructionID == MachineOutlinerTailCall)
return;
// We're a normal call, so our sequence doesn't have a return instruction.
// Add it in.
MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
MBB.insert(MBB.end(), retq);
}
MachineBasicBlock::iterator
X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &It,
MachineFunction &MF,
const outliner::Candidate &C) const {
// Is it a tail call?
if (C.CallConstructionID == MachineOutlinerTailCall) {
// Yes, just insert a JMP.
It = MBB.insert(It,
BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
.addGlobalAddress(M.getNamedValue(MF.getName())));
} else {
// No, insert a call.
It = MBB.insert(It,
BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
.addGlobalAddress(M.getNamedValue(MF.getName())));
}
return It;
}
#define GET_INSTRINFO_HELPERS
#include "X86GenInstrInfo.inc"
Index: stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86MachineFunctionInfo.h
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86MachineFunctionInfo.h (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86MachineFunctionInfo.h (revision 356465)
@@ -1,184 +1,192 @@
//===-- X86MachineFunctionInfo.h - X86 machine function info ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares X86-specific per-machine-function information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_X86MACHINEFUNCTIONINFO_H
#define LLVM_LIB_TARGET_X86_X86MACHINEFUNCTIONINFO_H
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Support/MachineValueType.h"
namespace llvm {
/// X86MachineFunctionInfo - This class is derived from MachineFunction and
/// contains private X86 target-specific information for each MachineFunction.
class X86MachineFunctionInfo : public MachineFunctionInfo {
virtual void anchor();
/// ForceFramePointer - True if the function is required to use of frame
/// pointer for reasons other than it containing dynamic allocation or
/// that FP eliminatation is turned off. For example, Cygwin main function
/// contains stack pointer re-alignment code which requires FP.
bool ForceFramePointer = false;
/// RestoreBasePointerOffset - Non-zero if the function has base pointer
/// and makes call to llvm.eh.sjlj.setjmp. When non-zero, the value is a
/// displacement from the frame pointer to a slot where the base pointer
/// is stashed.
signed char RestoreBasePointerOffset = 0;
+ /// WinEHXMMSlotInfo - Slot information of XMM registers in the stack frame
+ /// in bytes.
+ DenseMap<int, unsigned> WinEHXMMSlotInfo;
+
/// CalleeSavedFrameSize - Size of the callee-saved register portion of the
/// stack frame in bytes.
unsigned CalleeSavedFrameSize = 0;
/// BytesToPopOnReturn - Number of bytes function pops on return (in addition
/// to the space used by the return address).
/// Used on windows platform for stdcall & fastcall name decoration
unsigned BytesToPopOnReturn = 0;
/// ReturnAddrIndex - FrameIndex for return slot.
int ReturnAddrIndex = 0;
/// FrameIndex for return slot.
int FrameAddrIndex = 0;
/// TailCallReturnAddrDelta - The number of bytes by which return address
/// stack slot is moved as the result of tail call optimization.
int TailCallReturnAddrDelta = 0;
/// SRetReturnReg - Some subtargets require that sret lowering includes
/// returning the value of the returned struct in a register. This field
/// holds the virtual register into which the sret argument is passed.
unsigned SRetReturnReg = 0;
/// GlobalBaseReg - keeps track of the virtual register initialized for
/// use as the global base register. This is used for PIC in some PIC
/// relocation models.
unsigned GlobalBaseReg = 0;
/// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex = 0;
/// RegSaveFrameIndex - X86-64 vararg func register save area.
int RegSaveFrameIndex = 0;
/// VarArgsGPOffset - X86-64 vararg func int reg offset.
unsigned VarArgsGPOffset = 0;
/// VarArgsFPOffset - X86-64 vararg func fp reg offset.
unsigned VarArgsFPOffset = 0;
/// ArgumentStackSize - The number of bytes on stack consumed by the arguments
/// being passed on the stack.
unsigned ArgumentStackSize = 0;
/// NumLocalDynamics - Number of local-dynamic TLS accesses.
unsigned NumLocalDynamics = 0;
/// HasPushSequences - Keeps track of whether this function uses sequences
/// of pushes to pass function parameters.
bool HasPushSequences = false;
/// True if the function recovers from an SEH exception, and therefore needs
/// to spill and restore the frame pointer.
bool HasSEHFramePtrSave = false;
/// The frame index of a stack object containing the original frame pointer
/// used to address arguments in a function using a base pointer.
int SEHFramePtrSaveIndex = 0;
/// True if this function has a subset of CSRs that is handled explicitly via
/// copies.
bool IsSplitCSR = false;
/// True if this function uses the red zone.
bool UsesRedZone = false;
/// True if this function has WIN_ALLOCA instructions.
bool HasWinAlloca = false;
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
/// that must be forwarded to every musttail call.
SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
public:
X86MachineFunctionInfo() = default;
explicit X86MachineFunctionInfo(MachineFunction &MF) {}
bool getForceFramePointer() const { return ForceFramePointer;}
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; }
bool getHasPushSequences() const { return HasPushSequences; }
void setHasPushSequences(bool HasPush) { HasPushSequences = HasPush; }
bool getRestoreBasePointer() const { return RestoreBasePointerOffset!=0; }
void setRestoreBasePointer(const MachineFunction *MF);
int getRestoreBasePointerOffset() const {return RestoreBasePointerOffset; }
+
+ DenseMap<int, unsigned>& getWinEHXMMSlotInfo() { return WinEHXMMSlotInfo; }
+ const DenseMap<int, unsigned>& getWinEHXMMSlotInfo() const {
+ return WinEHXMMSlotInfo; }
unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; }
unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
void setBytesToPopOnReturn (unsigned bytes) { BytesToPopOnReturn = bytes;}
int getRAIndex() const { return ReturnAddrIndex; }
void setRAIndex(int Index) { ReturnAddrIndex = Index; }
int getFAIndex() const { return FrameAddrIndex; }
void setFAIndex(int Index) { FrameAddrIndex = Index; }
int getTCReturnAddrDelta() const { return TailCallReturnAddrDelta; }
void setTCReturnAddrDelta(int delta) {TailCallReturnAddrDelta = delta;}
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Idx) { VarArgsFrameIndex = Idx; }
int getRegSaveFrameIndex() const { return RegSaveFrameIndex; }
void setRegSaveFrameIndex(int Idx) { RegSaveFrameIndex = Idx; }
unsigned getVarArgsGPOffset() const { return VarArgsGPOffset; }
void setVarArgsGPOffset(unsigned Offset) { VarArgsGPOffset = Offset; }
unsigned getVarArgsFPOffset() const { return VarArgsFPOffset; }
void setVarArgsFPOffset(unsigned Offset) { VarArgsFPOffset = Offset; }
unsigned getArgumentStackSize() const { return ArgumentStackSize; }
void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; }
unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
bool getHasSEHFramePtrSave() const { return HasSEHFramePtrSave; }
void setHasSEHFramePtrSave(bool V) { HasSEHFramePtrSave = V; }
int getSEHFramePtrSaveIndex() const { return SEHFramePtrSaveIndex; }
void setSEHFramePtrSaveIndex(int Index) { SEHFramePtrSaveIndex = Index; }
SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
return ForwardedMustTailRegParms;
}
bool isSplitCSR() const { return IsSplitCSR; }
void setIsSplitCSR(bool s) { IsSplitCSR = s; }
bool getUsesRedZone() const { return UsesRedZone; }
void setUsesRedZone(bool V) { UsesRedZone = V; }
bool hasWinAlloca() const { return HasWinAlloca; }
void setHasWinAlloca(bool v) { HasWinAlloca = v; }
};
} // End llvm namespace
#endif
Index: stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86RegisterInfo.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86RegisterInfo.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Target/X86/X86RegisterInfo.cpp (revision 356465)
@@ -1,789 +1,806 @@
//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetRegisterInfo class.
// This file is responsible for the frame pointer elimination optimization
// on X86.
//
//===----------------------------------------------------------------------===//
#include "X86RegisterInfo.h"
#include "X86FrameLowering.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "X86GenRegisterInfo.inc"
static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true),
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
X86_MC::initLLVMToSEHAndCVRegMapping(this);
// Cache some information.
Is64Bit = TT.isArch64Bit();
IsWin64 = Is64Bit && TT.isOSWindows();
// Use a callee-saved register as the base pointer. These registers must
// not conflict with any ABI requirements. For example, in 32-bit mode PIC
// requires GOT in the EBX register before function calls via PLT GOT pointer.
if (Is64Bit) {
SlotSize = 8;
// This matches the simplified 32-bit pointer code in the data layout
// computation.
// FIXME: Should use the data layout?
bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
} else {
SlotSize = 4;
StackPtr = X86::ESP;
FramePtr = X86::EBP;
BasePtr = X86::ESI;
}
}
bool
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
// ExecutionDomainFix, BreakFalseDeps and PostRAScheduler require liveness.
return true;
}
int
X86RegisterInfo::getSEHRegNum(unsigned i) const {
return getEncodingValue(i);
}
const TargetRegisterClass *
X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
unsigned Idx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
// It behaves just like the sub_8bit_hi index.
if (!Is64Bit && Idx == X86::sub_8bit)
Idx = X86::sub_8bit_hi;
// Forward to TableGen's default version.
return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
}
const TargetRegisterClass *
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B,
unsigned SubIdx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
if (!Is64Bit && SubIdx == X86::sub_8bit) {
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
if (!A)
return nullptr;
}
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
}
const TargetRegisterClass *
X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &MF) const {
// Don't allow super-classes of GR8_NOREX. This class is only used after
// extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
// to the full GR8 register class in 64-bit mode, so we cannot allow the
// reigster class inflation.
//
// The GR8_NOREX class is always used in a way that won't be constrained to a
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
// full GR8 class.
if (RC == &X86::GR8_NOREXRegClass)
return RC;
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
const TargetRegisterClass *Super = RC;
TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
do {
switch (Super->getID()) {
case X86::FR32RegClassID:
case X86::FR64RegClassID:
// If AVX-512 isn't supported we should only inflate to these classes.
if (!Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128RegClassID:
case X86::VR256RegClassID:
// If VLX isn't supported we should only inflate to these classes.
if (!Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128XRegClassID:
case X86::VR256XRegClassID:
// If VLX isn't support we shouldn't inflate to these classes.
if (Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::FR32XRegClassID:
case X86::FR64XRegClassID:
// If AVX-512 isn't support we shouldn't inflate to these classes.
if (Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::GR8RegClassID:
case X86::GR16RegClassID:
case X86::GR32RegClassID:
case X86::GR64RegClassID:
case X86::RFP32RegClassID:
case X86::RFP64RegClassID:
case X86::RFP80RegClassID:
case X86::VR512_0_15RegClassID:
case X86::VR512RegClassID:
// Don't return a super-class that would shrink the spill size.
// That can happen with the vector and float classes.
if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
}
Super = *I++;
} while (Super);
return RC;
}
const TargetRegisterClass *
X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
switch (Kind) {
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
case 0: // Normal GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64RegClass;
// If the target is 64bit but we have been told to use 32bit addresses,
// we can still use 64-bit register as long as we know the high bits
// are zeros.
// Reflect that in the returned register class.
if (Is64Bit) {
// When the target also allows 64-bit frame pointer and we do have a
// frame, this is fine to use it for the address accesses as well.
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
? &X86::LOW32_ADDR_ACCESS_RBPRegClass
: &X86::LOW32_ADDR_ACCESSRegClass;
}
return &X86::GR32RegClass;
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOSPRegClass;
// NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOSPRegClass;
case 2: // NOREX GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREXRegClass;
return &X86::GR32_NOREXRegClass;
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREX_NOSPRegClass;
// NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOREX_NOSPRegClass;
case 4: // Available for tailcall (not callee-saved GPRs).
return getGPRsForTailCall(MF);
}
}
bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
unsigned DefSubReg,
const TargetRegisterClass *SrcRC,
unsigned SrcSubReg) const {
// Prevent rewriting a copy where the destination size is larger than the
// input size. See PR41619.
// FIXME: Should this be factored into the base implementation somehow.
if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
return false;
return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
SrcRC, SrcSubReg);
}
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
const Function &F = MF.getFunction();
if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
}
const TargetRegisterClass *
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &X86::CCRRegClass) {
if (Is64Bit)
return &X86::GR64RegClass;
else
return &X86::GR32RegClass;
}
return RC;
}
unsigned
X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
switch (RC->getID()) {
default:
return 0;
case X86::GR32RegClassID:
return 4 - FPDiff;
case X86::GR64RegClassID:
return 12 - FPDiff;
case X86::VR128RegClassID:
return Is64Bit ? 10 : 4;
case X86::VR64RegClassID:
return 4;
}
}
const MCPhysReg *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "MachineFunction required");
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
const Function &F = MF->getFunction();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->callsEHReturn();
CallingConv::ID CC = F.getCallingConv();
// If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
// convention because it has the CSR list.
if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
CC = CallingConv::X86_INTR;
switch (CC) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_SaveList;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
return CSR_64_AllRegs_SaveList;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_SaveList;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_SaveList;
return CSR_64_RT_AllRegs_SaveList;
case CallingConv::CXX_FAST_TLS:
if (Is64Bit)
return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
break;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_SaveList;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_SaveList;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
return (HasSSE ? CSR_Win64_RegCall_SaveList :
CSR_Win64_RegCall_NoSSE_SaveList);
} else {
return (HasSSE ? CSR_SysV64_RegCall_SaveList :
CSR_SysV64_RegCall_NoSSE_SaveList);
}
} else {
return (HasSSE ? CSR_32_RegCall_SaveList :
CSR_32_RegCall_NoSSE_SaveList);
}
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_SaveList;
break;
case CallingConv::Win64:
if (!HasSSE)
return CSR_Win64_NoSSE_SaveList;
return CSR_Win64_SaveList;
case CallingConv::X86_64_SysV:
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
case CallingConv::X86_INTR:
if (Is64Bit) {
if (HasAVX512)
return CSR_64_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_64_AllRegs_SaveList;
return CSR_64_AllRegs_NoSSE_SaveList;
} else {
if (HasAVX512)
return CSR_32_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_32_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_32_AllRegs_SSE_SaveList;
return CSR_32_AllRegs_SaveList;
}
default:
break;
}
if (Is64Bit) {
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_SaveList
: CSR_64_SwiftError_SaveList;
if (IsWin64)
return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
}
return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
}
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
}
const uint32_t *
X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
switch (CC) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_RegMask;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
return CSR_64_AllRegs_RegMask;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_RegMask;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_RegMask;
return CSR_64_RT_AllRegs_RegMask;
case CallingConv::CXX_FAST_TLS:
if (Is64Bit)
return CSR_64_TLS_Darwin_RegMask;
break;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_RegMask;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_RegMask;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
return (HasSSE ? CSR_Win64_RegCall_RegMask :
CSR_Win64_RegCall_NoSSE_RegMask);
} else {
return (HasSSE ? CSR_SysV64_RegCall_RegMask :
CSR_SysV64_RegCall_NoSSE_RegMask);
}
} else {
return (HasSSE ? CSR_32_RegCall_RegMask :
CSR_32_RegCall_NoSSE_RegMask);
}
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
break;
case CallingConv::Win64:
return CSR_Win64_RegMask;
case CallingConv::X86_64_SysV:
return CSR_64_RegMask;
case CallingConv::X86_INTR:
if (Is64Bit) {
if (HasAVX512)
return CSR_64_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_64_AllRegs_RegMask;
return CSR_64_AllRegs_NoSSE_RegMask;
} else {
if (HasAVX512)
return CSR_32_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_32_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_32_AllRegs_SSE_RegMask;
return CSR_32_AllRegs_RegMask;
}
default:
break;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
const Function &F = MF.getFunction();
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
}
return CSR_32_RegMask;
}
const uint32_t*
X86RegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
return CSR_64_TLS_Darwin_RegMask;
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const X86FrameLowering *TFI = getFrameLowering(MF);
// Set the floating point control register as reserved.
Reserved.set(X86::FPCW);
// Set the stack-pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the Shadow Stack Pointer as reserved.
Reserved.set(X86::SSP);
// Set the instruction pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the frame-pointer register and its aliases as reserved if needed.
if (TFI->hasFP(MF)) {
for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
}
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
CallingConv::ID CC = MF.getFunction().getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
"Stack realignment in presence of dynamic allocas is not supported with"
"this calling convention.");
unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
I.isValid(); ++I)
Reserved.set(*I);
}
// Mark the segment registers as reserved.
Reserved.set(X86::CS);
Reserved.set(X86::SS);
Reserved.set(X86::DS);
Reserved.set(X86::ES);
Reserved.set(X86::FS);
Reserved.set(X86::GS);
// Mark the floating point stack registers as reserved.
for (unsigned n = 0; n != 8; ++n)
Reserved.set(X86::ST0 + n);
// Reserve the registers that only exist in 64-bit mode.
if (!Is64Bit) {
// These 8-bit registers are part of the x86-64 extension even though their
// super-registers are old 32-bits.
Reserved.set(X86::SIL);
Reserved.set(X86::DIL);
Reserved.set(X86::BPL);
Reserved.set(X86::SPL);
Reserved.set(X86::SIH);
Reserved.set(X86::DIH);
Reserved.set(X86::BPH);
Reserved.set(X86::SPH);
for (unsigned n = 0; n != 8; ++n) {
// R8, R9, ...
for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
// XMM8, XMM9, ...
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
for (unsigned n = 16; n != 32; ++n) {
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
assert(checkAllSuperRegsMarked(Reserved,
{X86::SIL, X86::DIL, X86::BPL, X86::SPL,
X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
return Reserved;
}
void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
// Check if the EFLAGS register is marked as live-out. This shouldn't happen,
// because the calling convention defines the EFLAGS register as NOT
// preserved.
//
// Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
// an assert to track this and clear the register afterwards to avoid
// unnecessary crashes during release builds.
assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
"EFLAGS are not live-out from a patchpoint.");
// Also clean other registers that don't need preserving (IP).
for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
Mask[Reg / 32] &= ~(1U << (Reg % 32));
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
static bool CantUseSP(const MachineFrameInfo &MFI) {
return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
}
bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (!EnableBasePointer)
return false;
// When we need stack realignment, we can't address the stack from the frame
// pointer. When we have dynamic allocas or stack-adjusting inline asm, we
// can't address variables from the stack pointer. MS inline asm can
// reference locals while also adjusting the stack pointer. When we can't
// use both the SP and the FP, we need a separate base pointer register.
bool CantUseFP = needsStackRealignment(MF);
return CantUseFP && CantUseSP(MFI);
}
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
if (!TargetRegisterInfo::canRealignStack(MF))
return false;
const MachineFrameInfo &MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
// Stack realignment requires a frame pointer. If we already started
// register allocation with frame pointer elimination, it is too late now.
if (!MRI->canReserveReg(FramePtr))
return false;
// If a base pointer is necessary. Check that it isn't too late to reserve
// it.
if (CantUseSP(MFI))
return MRI->canReserveReg(BasePtr);
return true;
}
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
unsigned Reg, int &FrameIdx) const {
// Since X86 defines assignCalleeSavedSpillSlots which always return true
// this function neither used nor tested.
llvm_unreachable("Unused function on X86. Otherwise need a test case.");
}
// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
// TODO: In this case we should be really trying first to entirely eliminate
// this instruction which is a plain copy.
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
MachineInstr &MI = *II;
unsigned Opc = II->getOpcode();
// Check if this is a LEA of the form 'lea (%esp), %ebx'
if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
MI.getOperand(2).getImm() != 1 ||
MI.getOperand(3).getReg() != X86::NoRegister ||
MI.getOperand(4).getImm() != 0 ||
MI.getOperand(5).getReg() != X86::NoRegister)
return false;
unsigned BasePtr = MI.getOperand(1).getReg();
// In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
// be replaced with a 32-bit operand MOV which will zero extend the upper
// 32-bits of the super register.
if (Opc == X86::LEA64_32r)
BasePtr = getX86SubSuperRegister(BasePtr, 32);
unsigned NewDestReg = MI.getOperand(0).getReg();
const X86InstrInfo *TII =
MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
MI.getOperand(1).isKill());
MI.eraseFromParent();
return true;
}
+static bool isFuncletReturnInstr(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case X86::CATCHRET:
+ case X86::CLEANUPRET:
+ return true;
+ default:
+ return false;
+ }
+ llvm_unreachable("impossible");
+}
+
void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
MachineInstr &MI = *II;
- MachineFunction &MF = *MI.getParent()->getParent();
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
+ : isFuncletReturnInstr(*MBBI);
const X86FrameLowering *TFI = getFrameLowering(MF);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
// Determine base register and offset.
int FIOffset;
unsigned BasePtr;
if (MI.isReturn()) {
assert((!needsStackRealignment(MF) ||
MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
"Return instruction can only reference SP relative frame objects");
FIOffset = TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0);
+ } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
+ FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
} else {
FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr);
}
// LOCAL_ESCAPE uses a single offset, with no register. It only works in the
// simple FP case, and doesn't work with stack realignment. On 32-bit, the
// offset is from the traditional base pointer location. On 64-bit, the
// offset is from the SP at the end of the prologue, not the FP location. This
// matches the behavior of llvm.frameaddress.
unsigned Opc = MI.getOpcode();
if (Opc == TargetOpcode::LOCAL_ESCAPE) {
MachineOperand &FI = MI.getOperand(FIOperandNum);
FI.ChangeToImmediate(FIOffset);
return;
}
// For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
// register as source operand, semantic is the same and destination is
// 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
// Don't change BasePtr since it is used later for stack adjustment.
unsigned MachineBasePtr = BasePtr;
if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
// This must be part of a four operand memory reference. Replace the
// FrameIndex with base register. Add an offset to the offset.
MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
if (BasePtr == StackPtr)
FIOffset += SPAdj;
// The frame index format for stackmaps and patchpoints is different from the
// X86 format. It only has a FI and an offset.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
assert(BasePtr == FramePtr && "Expected the FP as base register");
int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
}
if (MI.getOperand(FIOperandNum+3).isImm()) {
// Offset is a 32-bit integer.
int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
int Offset = FIOffset + Imm;
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
"Requesting 64-bit offset in 32-bit immediate!");
if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
} else {
// Offset is symbolic. This is extremely rare.
uint64_t Offset = FIOffset +
(uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
MI.getOperand(FIOperandNum + 3).setOffset(Offset);
}
}
Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
unsigned
X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
unsigned FrameReg = getFrameRegister(MF);
if (Subtarget.isTarget64BitILP32())
FrameReg = getX86SubSuperRegister(FrameReg, 32);
return FrameReg;
}
unsigned
X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
unsigned StackReg = getStackRegister();
if (Subtarget.isTarget64BitILP32())
StackReg = getX86SubSuperRegister(StackReg, 32);
return StackReg;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp (revision 356465)
@@ -1,3315 +1,3315 @@
//===- AddressSanitizer.cpp - memory error detector -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
// Details of the algorithm:
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iomanip>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
using namespace llvm;
#define DEBUG_TYPE "asan"
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kDynamicShadowSentinel =
std::numeric_limits<uint64_t>::max();
static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
static const uint64_t kEmscriptenShadowOffset = 0;
static const uint64_t kMyriadShadowScale = 5;
static const uint64_t kMyriadMemoryOffset32 = 0x80000000ULL;
static const uint64_t kMyriadMemorySize32 = 0x20000000ULL;
static const uint64_t kMyriadTagShift = 29;
static const uint64_t kMyriadDDRTag = 4;
static const uint64_t kMyriadCacheBitMask32 = 0x40000000ULL;
// The shadow memory space is dynamically allocated.
static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
static const uint64_t kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
static const char *const kAsanUnregisterGlobalsName =
"__asan_unregister_globals";
static const char *const kAsanRegisterImageGlobalsName =
"__asan_register_image_globals";
static const char *const kAsanUnregisterImageGlobalsName =
"__asan_unregister_image_globals";
static const char *const kAsanRegisterElfGlobalsName =
"__asan_register_elf_globals";
static const char *const kAsanUnregisterElfGlobalsName =
"__asan_unregister_elf_globals";
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
static const char *const kAsanInitName = "__asan_init";
static const char *const kAsanVersionCheckNamePrefix =
"__asan_version_mismatch_check_v";
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
static const int kMaxAsanStackMallocSizeClass = 10;
static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
static const char *const kAsanGenPrefix = "___asan_gen_";
static const char *const kODRGenPrefix = "__odr_asan_gen_";
static const char *const kSanCovGenPrefix = "__sancov_gen_";
static const char *const kAsanSetShadowPrefix = "__asan_set_shadow_";
static const char *const kAsanPoisonStackMemoryName =
"__asan_poison_stack_memory";
static const char *const kAsanUnpoisonStackMemoryName =
"__asan_unpoison_stack_memory";
// ASan version script has __asan_* wildcard. Triple underscore prevents a
// linker (gold) warning about attempting to export a local symbol.
static const char *const kAsanGlobalsRegisteredFlagName =
"___asan_globals_registered";
static const char *const kAsanOptionDetectUseAfterReturn =
"__asan_option_detect_stack_use_after_return";
static const char *const kAsanShadowMemoryDynamicAddress =
"__asan_shadow_memory_dynamic_address";
static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
static const unsigned kAllocaRzSize = 32;
// Command-line flags.
static cl::opt<bool> ClEnableKasan(
"asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
cl::Hidden, cl::init(false));
static cl::opt<bool> ClRecover(
"asan-recover",
cl::desc("Enable recovery mode (continue-after-error)."),
cl::Hidden, cl::init(false));
// This flag may need to be replaced with -f[no-]asan-reads.
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
cl::desc("instrument read instructions"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClInstrumentWrites(
"asan-instrument-writes", cl::desc("instrument write instructions"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClInstrumentAtomics(
"asan-instrument-atomics",
cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
cl::init(true));
static cl::opt<bool> ClAlwaysSlowPath(
"asan-always-slow-path",
cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
cl::init(false));
static cl::opt<bool> ClForceDynamicShadow(
"asan-force-dynamic-shadow",
cl::desc("Load shadow address into a local variable for each function"),
cl::Hidden, cl::init(false));
static cl::opt<bool>
ClWithIfunc("asan-with-ifunc",
cl::desc("Access dynamic shadow through an ifunc global on "
"platforms that support this"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClWithIfuncSuppressRemat(
"asan-with-ifunc-suppress-remat",
cl::desc("Suppress rematerialization of dynamic shadow address by passing "
"it through inline asm in prologue."),
cl::Hidden, cl::init(true));
// This flag limits the number of instructions to be instrumented
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
// set it to 10000.
static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
"asan-max-ins-per-bb", cl::init(10000),
cl::desc("maximal number of instructions to instrument in any given BB"),
cl::Hidden);
// This flag may need to be replaced with -f[no]asan-stack.
static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
cl::Hidden, cl::init(true));
static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
"asan-max-inline-poisoning-size",
cl::desc(
"Inline shadow poisoning for blocks up to the given size in bytes."),
cl::Hidden, cl::init(64));
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
cl::desc("Check stack-use-after-return"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
cl::desc("Create redzones for byval "
"arguments (extra copy "
"required)"), cl::Hidden,
cl::init(true));
static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
cl::desc("Check stack-use-after-scope"),
cl::Hidden, cl::init(false));
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden,
cl::init(true));
static cl::opt<bool> ClInitializers("asan-initialization-order",
cl::desc("Handle C++ initializer order"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClInvalidPointerPairs(
"asan-detect-invalid-pointer-pair",
cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
cl::init(false));
static cl::opt<bool> ClInvalidPointerCmp(
"asan-detect-invalid-pointer-cmp",
cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
cl::init(false));
static cl::opt<bool> ClInvalidPointerSub(
"asan-detect-invalid-pointer-sub",
cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
cl::init(false));
static cl::opt<unsigned> ClRealignStack(
"asan-realign-stack",
cl::desc("Realign stack to the value of this flag (power of two)"),
cl::Hidden, cl::init(32));
static cl::opt<int> ClInstrumentationWithCallsThreshold(
"asan-instrumentation-with-call-threshold",
cl::desc(
"If the function being instrumented contains more than "
"this number of memory accesses, use callbacks instead of "
"inline checks (-1 means never use callbacks)."),
cl::Hidden, cl::init(7000));
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
"asan-memory-access-callback-prefix",
cl::desc("Prefix for memory access callbacks"), cl::Hidden,
cl::init("__asan_"));
static cl::opt<bool>
ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
cl::desc("instrument dynamic allocas"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClSkipPromotableAllocas(
"asan-skip-promotable-allocas",
cl::desc("Do not instrument promotable allocas"), cl::Hidden,
cl::init(true));
// These flags allow to change the shadow mapping.
// The shadow mapping looks like
// Shadow = (Mem >> scale) + offset
static cl::opt<int> ClMappingScale("asan-mapping-scale",
cl::desc("scale of asan shadow mapping"),
cl::Hidden, cl::init(0));
static cl::opt<uint64_t>
ClMappingOffset("asan-mapping-offset",
cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
cl::Hidden, cl::init(0));
// Optimization flags. Not user visible, used mostly for testing
// and benchmarking the tool.
static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClOptSameTemp(
"asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClOptGlobals("asan-opt-globals",
cl::desc("Don't instrument scalar globals"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClOptStack(
"asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
cl::Hidden, cl::init(false));
static cl::opt<bool> ClDynamicAllocaStack(
"asan-stack-dynamic-alloca",
cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
cl::init(true));
static cl::opt<uint32_t> ClForceExperiment(
"asan-force-experiment",
cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
cl::init(0));
static cl::opt<bool>
ClUsePrivateAlias("asan-use-private-alias",
cl::desc("Use private aliases for global variables"),
cl::Hidden, cl::init(false));
static cl::opt<bool>
ClUseOdrIndicator("asan-use-odr-indicator",
cl::desc("Use odr indicators to improve ODR reporting"),
cl::Hidden, cl::init(false));
static cl::opt<bool>
ClUseGlobalsGC("asan-globals-live-support",
cl::desc("Use linker features to support dead "
"code stripping of globals"),
cl::Hidden, cl::init(true));
// This is on by default even though there is a bug in gold:
// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
static cl::opt<bool>
ClWithComdat("asan-with-comdat",
cl::desc("Place ASan constructors in comdat sections"),
cl::Hidden, cl::init(true));
// Debug flags.
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
cl::init(0));
static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
cl::Hidden, cl::init(0));
static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
cl::desc("Debug func"));
static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
cl::Hidden, cl::init(-1));
static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
cl::Hidden, cl::init(-1));
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
STATISTIC(NumOptimizedAccessesToGlobalVar,
"Number of optimized accesses to global vars");
STATISTIC(NumOptimizedAccessesToStackVar,
"Number of optimized accesses to stack vars");
namespace {
/// This struct defines the shadow mapping using the rule:
/// shadow = (mem >> Scale) ADD-or-OR Offset.
/// If InGlobal is true, then
/// extern char __asan_shadow[];
/// shadow = (mem >> Scale) + &__asan_shadow
struct ShadowMapping {
int Scale;
uint64_t Offset;
bool OrShadowOffset;
bool InGlobal;
};
} // end anonymous namespace
static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
bool IsKasan) {
bool IsAndroid = TargetTriple.isAndroid();
bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS();
bool IsFreeBSD = TargetTriple.isOSFreeBSD();
bool IsNetBSD = TargetTriple.isOSNetBSD();
bool IsPS4CPU = TargetTriple.isPS4CPU();
bool IsLinux = TargetTriple.isOSLinux();
bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
TargetTriple.getArch() == Triple::ppc64le;
bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
bool IsMIPS32 = TargetTriple.isMIPS32();
bool IsMIPS64 = TargetTriple.isMIPS64();
bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
bool IsWindows = TargetTriple.isOSWindows();
bool IsFuchsia = TargetTriple.isOSFuchsia();
bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad;
bool IsEmscripten = TargetTriple.isOSEmscripten();
ShadowMapping Mapping;
Mapping.Scale = IsMyriad ? kMyriadShadowScale : kDefaultShadowScale;
if (ClMappingScale.getNumOccurrences() > 0) {
Mapping.Scale = ClMappingScale;
}
if (LongSize == 32) {
if (IsAndroid)
Mapping.Offset = kDynamicShadowSentinel;
else if (IsMIPS32)
Mapping.Offset = kMIPS32_ShadowOffset32;
else if (IsFreeBSD)
Mapping.Offset = kFreeBSD_ShadowOffset32;
else if (IsNetBSD)
Mapping.Offset = kNetBSD_ShadowOffset32;
else if (IsIOS)
Mapping.Offset = kDynamicShadowSentinel;
else if (IsWindows)
Mapping.Offset = kWindowsShadowOffset32;
else if (IsEmscripten)
Mapping.Offset = kEmscriptenShadowOffset;
else if (IsMyriad) {
uint64_t ShadowOffset = (kMyriadMemoryOffset32 + kMyriadMemorySize32 -
(kMyriadMemorySize32 >> Mapping.Scale));
Mapping.Offset = ShadowOffset - (kMyriadMemoryOffset32 >> Mapping.Scale);
}
else
Mapping.Offset = kDefaultShadowOffset32;
} else { // LongSize == 64
// Fuchsia is always PIE, which means that the beginning of the address
// space is always available.
if (IsFuchsia)
Mapping.Offset = 0;
else if (IsPPC64)
Mapping.Offset = kPPC64_ShadowOffset64;
else if (IsSystemZ)
Mapping.Offset = kSystemZ_ShadowOffset64;
else if (IsFreeBSD && !IsMIPS64)
Mapping.Offset = kFreeBSD_ShadowOffset64;
else if (IsNetBSD) {
if (IsKasan)
Mapping.Offset = kNetBSDKasan_ShadowOffset64;
else
Mapping.Offset = kNetBSD_ShadowOffset64;
} else if (IsPS4CPU)
Mapping.Offset = kPS4CPU_ShadowOffset64;
else if (IsLinux && IsX86_64) {
if (IsKasan)
Mapping.Offset = kLinuxKasan_ShadowOffset64;
else
Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
(kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
} else if (IsWindows && IsX86_64) {
Mapping.Offset = kWindowsShadowOffset64;
} else if (IsMIPS64)
Mapping.Offset = kMIPS64_ShadowOffset64;
else if (IsIOS)
Mapping.Offset = kDynamicShadowSentinel;
else if (IsAArch64)
Mapping.Offset = kAArch64_ShadowOffset64;
else
Mapping.Offset = kDefaultShadowOffset64;
}
if (ClForceDynamicShadow) {
Mapping.Offset = kDynamicShadowSentinel;
}
if (ClMappingOffset.getNumOccurrences() > 0) {
Mapping.Offset = ClMappingOffset;
}
// OR-ing shadow offset if more efficient (at least on x86) if the offset
// is a power of two, but on ppc64 we have to use add since the shadow
// offset is not necessary 1/8-th of the address space. On SystemZ,
// we could OR the constant in a single instruction, but it's more
// efficient to load it once and use indexed addressing.
Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU &&
!(Mapping.Offset & (Mapping.Offset - 1)) &&
Mapping.Offset != kDynamicShadowSentinel;
bool IsAndroidWithIfuncSupport =
IsAndroid && !TargetTriple.isAndroidVersionLT(21);
Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
return Mapping;
}
static size_t RedzoneSizeForScale(int MappingScale) {
// Redzone used for stack and globals is at least 32 bytes.
// For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
return std::max(32U, 1U << MappingScale);
}
namespace {
/// Module analysis for getting various metadata about the module.
class ASanGlobalsMetadataWrapperPass : public ModulePass {
public:
static char ID;
ASanGlobalsMetadataWrapperPass() : ModulePass(ID) {
initializeASanGlobalsMetadataWrapperPassPass(
*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override {
GlobalsMD = GlobalsMetadata(M);
return false;
}
StringRef getPassName() const override {
return "ASanGlobalsMetadataWrapperPass";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
GlobalsMetadata &getGlobalsMD() { return GlobalsMD; }
private:
GlobalsMetadata GlobalsMD;
};
char ASanGlobalsMetadataWrapperPass::ID = 0;
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer {
AddressSanitizer(Module &M, GlobalsMetadata &GlobalsMD,
bool CompileKernel = false, bool Recover = false,
bool UseAfterScope = false)
: UseAfterScope(UseAfterScope || ClUseAfterScope), GlobalsMD(GlobalsMD) {
this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
this->CompileKernel =
ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan : CompileKernel;
C = &(M.getContext());
LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
}
uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
uint64_t ArraySize = 1;
if (AI.isArrayAllocation()) {
const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
assert(CI && "non-constant array size");
ArraySize = CI->getZExtValue();
}
Type *Ty = AI.getAllocatedType();
uint64_t SizeInBytes =
AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
return SizeInBytes * ArraySize;
}
/// Check if we want (and can) handle this alloca.
bool isInterestingAlloca(const AllocaInst &AI);
/// If it is an interesting memory access, return the PointerOperand
/// and set IsWrite/Alignment. Otherwise return nullptr.
/// MaybeMask is an output parameter for the mask Value, if we're looking at a
/// masked load/store.
Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
uint64_t *TypeSize, unsigned *Alignment,
Value **MaybeMask = nullptr);
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls, uint32_t Exp);
void instrumentUnusualSizeOrAlignment(Instruction *I,
Instruction *InsertBefore, Value *Addr,
uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
Value *SizeArgument, uint32_t Exp);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
private:
friend struct FunctionStackPoisoner;
void initializeCallbacks(Module &M);
bool LooksLikeCodeInBug11395(Instruction *I);
bool GlobalIsLinkerInitialized(GlobalVariable *G);
bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
uint64_t TypeSize) const;
/// Helper to cleanup per-function state.
struct FunctionStateRAII {
AddressSanitizer *Pass;
FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
assert(Pass->ProcessedAllocas.empty() &&
"last pass forgot to clear cache");
assert(!Pass->LocalDynamicShadow);
}
~FunctionStateRAII() {
Pass->LocalDynamicShadow = nullptr;
Pass->ProcessedAllocas.clear();
}
};
LLVMContext *C;
Triple TargetTriple;
int LongSize;
bool CompileKernel;
bool Recover;
bool UseAfterScope;
Type *IntptrTy;
ShadowMapping Mapping;
FunctionCallee AsanHandleNoReturnFunc;
FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
Constant *AsanShadowGlobal;
// These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
// These arrays is indexed by AccessIsWrite and Experiment.
FunctionCallee AsanErrorCallbackSized[2][2];
FunctionCallee AsanMemoryAccessCallbackSized[2][2];
FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
InlineAsm *EmptyAsm;
Value *LocalDynamicShadow = nullptr;
- GlobalsMetadata GlobalsMD;
+ const GlobalsMetadata &GlobalsMD;
DenseMap<const AllocaInst *, bool> ProcessedAllocas;
};
class AddressSanitizerLegacyPass : public FunctionPass {
public:
static char ID;
explicit AddressSanitizerLegacyPass(bool CompileKernel = false,
bool Recover = false,
bool UseAfterScope = false)
: FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover),
UseAfterScope(UseAfterScope) {
initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
}
StringRef getPassName() const override {
return "AddressSanitizerFunctionPass";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<ASanGlobalsMetadataWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
bool runOnFunction(Function &F) override {
GlobalsMetadata &GlobalsMD =
getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
AddressSanitizer ASan(*F.getParent(), GlobalsMD, CompileKernel, Recover,
UseAfterScope);
return ASan.instrumentFunction(F, TLI);
}
private:
bool CompileKernel;
bool Recover;
bool UseAfterScope;
};
class ModuleAddressSanitizer {
public:
ModuleAddressSanitizer(Module &M, GlobalsMetadata &GlobalsMD,
bool CompileKernel = false, bool Recover = false,
bool UseGlobalsGC = true, bool UseOdrIndicator = false)
: GlobalsMD(GlobalsMD), UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC),
// Enable aliases as they should have no downside with ODR indicators.
UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias),
UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator),
// Not a typo: ClWithComdat is almost completely pointless without
// ClUseGlobalsGC (because then it only works on modules without
// globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
// and both suffer from gold PR19002 for which UseGlobalsGC constructor
// argument is designed as workaround. Therefore, disable both
// ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
// do globals-gc.
UseCtorComdat(UseGlobalsGC && ClWithComdat) {
this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
this->CompileKernel =
ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan : CompileKernel;
C = &(M.getContext());
int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
}
bool instrumentModule(Module &);
private:
void initializeCallbacks(Module &M);
bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers);
void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers,
const std::string &UniqueModuleId);
void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers);
void
InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers);
GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
StringRef OriginalName);
void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
StringRef InternalSuffix);
IRBuilder<> CreateAsanModuleDtor(Module &M);
bool ShouldInstrumentGlobal(GlobalVariable *G);
bool ShouldUseMachOGlobalsSection() const;
StringRef getGlobalMetadataSection() const;
void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
size_t MinRedzoneSizeForGlobal() const {
return RedzoneSizeForScale(Mapping.Scale);
}
int GetAsanVersion(const Module &M) const;
- GlobalsMetadata GlobalsMD;
+ const GlobalsMetadata &GlobalsMD;
bool CompileKernel;
bool Recover;
bool UseGlobalsGC;
bool UsePrivateAlias;
bool UseOdrIndicator;
bool UseCtorComdat;
Type *IntptrTy;
LLVMContext *C;
Triple TargetTriple;
ShadowMapping Mapping;
FunctionCallee AsanPoisonGlobals;
FunctionCallee AsanUnpoisonGlobals;
FunctionCallee AsanRegisterGlobals;
FunctionCallee AsanUnregisterGlobals;
FunctionCallee AsanRegisterImageGlobals;
FunctionCallee AsanUnregisterImageGlobals;
FunctionCallee AsanRegisterElfGlobals;
FunctionCallee AsanUnregisterElfGlobals;
Function *AsanCtorFunction = nullptr;
Function *AsanDtorFunction = nullptr;
};
class ModuleAddressSanitizerLegacyPass : public ModulePass {
public:
static char ID;
explicit ModuleAddressSanitizerLegacyPass(bool CompileKernel = false,
bool Recover = false,
bool UseGlobalGC = true,
bool UseOdrIndicator = false)
: ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover),
UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator) {
initializeModuleAddressSanitizerLegacyPassPass(
*PassRegistry::getPassRegistry());
}
StringRef getPassName() const override { return "ModuleAddressSanitizer"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<ASanGlobalsMetadataWrapperPass>();
}
bool runOnModule(Module &M) override {
GlobalsMetadata &GlobalsMD =
getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
ModuleAddressSanitizer ASanModule(M, GlobalsMD, CompileKernel, Recover,
UseGlobalGC, UseOdrIndicator);
return ASanModule.instrumentModule(M);
}
private:
bool CompileKernel;
bool Recover;
bool UseGlobalGC;
bool UseOdrIndicator;
};
// Stack poisoning does not play well with exception handling.
// When an exception is thrown, we essentially bypass the code
// that unpoisones the stack. This is why the run-time library has
// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
// stack in the interceptor. This however does not work inside the
// actual function which catches the exception. Most likely because the
// compiler hoists the load of the shadow value somewhere too high.
// This causes asan to report a non-existing bug on 453.povray.
// It sounds like an LLVM bug.
struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
Function &F;
AddressSanitizer &ASan;
DIBuilder DIB;
LLVMContext *C;
Type *IntptrTy;
Type *IntptrPtrTy;
ShadowMapping Mapping;
SmallVector<AllocaInst *, 16> AllocaVec;
SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
SmallVector<Instruction *, 8> RetVec;
unsigned StackAlignment;
FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
FunctionCallee AsanSetShadowFunc[0x100] = {};
FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
// Stores a place and arguments of poisoning/unpoisoning call for alloca.
struct AllocaPoisonCall {
IntrinsicInst *InsBefore;
AllocaInst *AI;
uint64_t Size;
bool DoPoison;
};
SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
bool HasUntracedLifetimeIntrinsic = false;
SmallVector<AllocaInst *, 1> DynamicAllocaVec;
SmallVector<IntrinsicInst *, 1> StackRestoreVec;
AllocaInst *DynamicAllocaLayout = nullptr;
IntrinsicInst *LocalEscapeCall = nullptr;
// Maps Value to an AllocaInst from which the Value is originated.
using AllocaForValueMapTy = DenseMap<Value *, AllocaInst *>;
AllocaForValueMapTy AllocaForValue;
bool HasNonEmptyInlineAsm = false;
bool HasReturnsTwiceCall = false;
std::unique_ptr<CallInst> EmptyInlineAsm;
FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
: F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
C(ASan.C), IntptrTy(ASan.IntptrTy),
IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
StackAlignment(1 << Mapping.Scale),
EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
bool runOnFunction() {
if (!ClStack) return false;
if (ClRedzoneByvalArgs)
copyArgsPassedByValToAllocas();
// Collect alloca, ret, lifetime instructions etc.
for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
initializeCallbacks(*F.getParent());
if (HasUntracedLifetimeIntrinsic) {
// If there are lifetime intrinsics which couldn't be traced back to an
// alloca, we may not know exactly when a variable enters scope, and
// therefore should "fail safe" by not poisoning them.
StaticAllocaPoisonCallVec.clear();
DynamicAllocaPoisonCallVec.clear();
}
processDynamicAllocas();
processStaticAllocas();
if (ClDebugStack) {
LLVM_DEBUG(dbgs() << F);
}
return true;
}
// Arguments marked with the "byval" attribute are implicitly copied without
// using an alloca instruction. To produce redzones for those arguments, we
// copy them a second time into memory allocated with an alloca instruction.
void copyArgsPassedByValToAllocas();
// Finds all Alloca instructions and puts
// poisoned red zones around all of them.
// Then unpoison everything back before the function returns.
void processStaticAllocas();
void processDynamicAllocas();
void createDynamicAllocasInitStorage();
// ----------------------- Visitors.
/// Collect all Ret instructions.
void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
/// Collect all Resume instructions.
void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
/// Collect all CatchReturnInst instructions.
void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
Value *SavedStack) {
IRBuilder<> IRB(InstBefore);
Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
// When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
// need to adjust extracted SP to compute the address of the most recent
// alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
// this purpose.
if (!isa<ReturnInst>(InstBefore)) {
Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
{IntptrTy});
Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
DynamicAreaOffset);
}
IRB.CreateCall(
AsanAllocasUnpoisonFunc,
{IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
}
// Unpoison dynamic allocas redzones.
void unpoisonDynamicAllocas() {
for (auto &Ret : RetVec)
unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
for (auto &StackRestoreInst : StackRestoreVec)
unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
StackRestoreInst->getOperand(0));
}
// Deploy and poison redzones around dynamic alloca call. To do this, we
// should replace this call with another one with changed parameters and
// replace all its uses with new address, so
// addr = alloca type, old_size, align
// is replaced by
// new_size = (old_size + additional_size) * sizeof(type)
// tmp = alloca i8, new_size, max(align, 32)
// addr = tmp + 32 (first 32 bytes are for the left redzone).
// Additional_size is added to make new memory allocation contain not only
// requested memory, but also left, partial and right redzones.
void handleDynamicAllocaCall(AllocaInst *AI);
/// Collect Alloca instructions we want (and can) handle.
void visitAllocaInst(AllocaInst &AI) {
if (!ASan.isInterestingAlloca(AI)) {
if (AI.isStaticAlloca()) {
// Skip over allocas that are present *before* the first instrumented
// alloca, we don't want to move those around.
if (AllocaVec.empty())
return;
StaticAllocasToMoveUp.push_back(&AI);
}
return;
}
StackAlignment = std::max(StackAlignment, AI.getAlignment());
if (!AI.isStaticAlloca())
DynamicAllocaVec.push_back(&AI);
else
AllocaVec.push_back(&AI);
}
/// Collect lifetime intrinsic calls to check for use-after-scope
/// errors.
void visitIntrinsicInst(IntrinsicInst &II) {
Intrinsic::ID ID = II.getIntrinsicID();
if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
if (!ASan.UseAfterScope)
return;
if (!II.isLifetimeStartOrEnd())
return;
// Found lifetime intrinsic, add ASan instrumentation if necessary.
ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
// If size argument is undefined, don't do anything.
if (Size->isMinusOne()) return;
// Check that size doesn't saturate uint64_t and can
// be stored in IntptrTy.
const uint64_t SizeValue = Size->getValue().getLimitedValue();
if (SizeValue == ~0ULL ||
!ConstantInt::isValueValidForType(IntptrTy, SizeValue))
return;
// Find alloca instruction that corresponds to llvm.lifetime argument.
AllocaInst *AI =
llvm::findAllocaForValue(II.getArgOperand(1), AllocaForValue);
if (!AI) {
HasUntracedLifetimeIntrinsic = true;
return;
}
// We're interested only in allocas we can handle.
if (!ASan.isInterestingAlloca(*AI))
return;
bool DoPoison = (ID == Intrinsic::lifetime_end);
AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
if (AI->isStaticAlloca())
StaticAllocaPoisonCallVec.push_back(APC);
else if (ClInstrumentDynamicAllocas)
DynamicAllocaPoisonCallVec.push_back(APC);
}
void visitCallSite(CallSite CS) {
Instruction *I = CS.getInstruction();
if (CallInst *CI = dyn_cast<CallInst>(I)) {
HasNonEmptyInlineAsm |= CI->isInlineAsm() &&
!CI->isIdenticalTo(EmptyInlineAsm.get()) &&
I != ASan.LocalDynamicShadow;
HasReturnsTwiceCall |= CI->canReturnTwice();
}
}
// ---------------------- Helpers.
void initializeCallbacks(Module &M);
// Copies bytes from ShadowBytes into shadow memory for indexes where
// ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
// ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
IRBuilder<> &IRB, Value *ShadowBase);
void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
size_t Begin, size_t End, IRBuilder<> &IRB,
Value *ShadowBase);
void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
ArrayRef<uint8_t> ShadowBytes, size_t Begin,
size_t End, IRBuilder<> &IRB, Value *ShadowBase);
void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
bool Dynamic);
PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
Instruction *ThenTerm, Value *ValueIfFalse);
};
} // end anonymous namespace
void LocationMetadata::parse(MDNode *MDN) {
assert(MDN->getNumOperands() == 3);
MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
Filename = DIFilename->getString();
LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
ColumnNo =
mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
}
// FIXME: It would be cleaner to instead attach relevant metadata to the globals
// we want to sanitize instead and reading this metadata on each pass over a
// function instead of reading module level metadata at first.
GlobalsMetadata::GlobalsMetadata(Module &M) {
NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
if (!Globals)
return;
for (auto MDN : Globals->operands()) {
// Metadata node contains the global and the fields of "Entry".
assert(MDN->getNumOperands() == 5);
auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
// The optimizer may optimize away a global entirely.
if (!V)
continue;
auto *StrippedV = V->stripPointerCasts();
auto *GV = dyn_cast<GlobalVariable>(StrippedV);
if (!GV)
continue;
// We can already have an entry for GV if it was merged with another
// global.
Entry &E = Entries[GV];
if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
E.SourceLoc.parse(Loc);
if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
E.Name = Name->getString();
ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3));
E.IsDynInit |= IsDynInit->isOne();
ConstantInt *IsBlacklisted =
mdconst::extract<ConstantInt>(MDN->getOperand(4));
E.IsBlacklisted |= IsBlacklisted->isOne();
}
}
AnalysisKey ASanGlobalsMetadataAnalysis::Key;
GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M,
ModuleAnalysisManager &AM) {
return GlobalsMetadata(M);
}
AddressSanitizerPass::AddressSanitizerPass(bool CompileKernel, bool Recover,
bool UseAfterScope)
: CompileKernel(CompileKernel), Recover(Recover),
UseAfterScope(UseAfterScope) {}
PreservedAnalyses AddressSanitizerPass::run(Function &F,
AnalysisManager<Function> &AM) {
auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
auto &MAM = MAMProxy.getManager();
Module &M = *F.getParent();
if (auto *R = MAM.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) {
const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
AddressSanitizer Sanitizer(M, *R, CompileKernel, Recover, UseAfterScope);
if (Sanitizer.instrumentFunction(F, TLI))
return PreservedAnalyses::none();
return PreservedAnalyses::all();
}
report_fatal_error(
"The ASanGlobalsMetadataAnalysis is required to run before "
"AddressSanitizer can run");
return PreservedAnalyses::all();
}
ModuleAddressSanitizerPass::ModuleAddressSanitizerPass(bool CompileKernel,
bool Recover,
bool UseGlobalGC,
bool UseOdrIndicator)
: CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC),
UseOdrIndicator(UseOdrIndicator) {}
PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M,
AnalysisManager<Module> &AM) {
GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M);
ModuleAddressSanitizer Sanitizer(M, GlobalsMD, CompileKernel, Recover,
UseGlobalGC, UseOdrIndicator);
if (Sanitizer.instrumentModule(M))
return PreservedAnalyses::none();
return PreservedAnalyses::all();
}
INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md",
"Read metadata to mark which globals should be instrumented "
"when running ASan.",
false, true)
char AddressSanitizerLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(
AddressSanitizerLegacyPass, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
false)
INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(
AddressSanitizerLegacyPass, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
false)
FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel,
bool Recover,
bool UseAfterScope) {
assert(!CompileKernel || Recover);
return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope);
}
char ModuleAddressSanitizerLegacyPass::ID = 0;
INITIALIZE_PASS(
ModuleAddressSanitizerLegacyPass, "asan-module",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass",
false, false)
ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass(
bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator) {
assert(!CompileKernel || Recover);
return new ModuleAddressSanitizerLegacyPass(CompileKernel, Recover,
UseGlobalsGC, UseOdrIndicator);
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
size_t Res = countTrailingZeros(TypeSize / 8);
assert(Res < kNumberOfAccessSizes);
return Res;
}
/// Create a global describing a source location.
static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
LocationMetadata MD) {
Constant *LocData[] = {
createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix),
ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
};
auto LocStruct = ConstantStruct::getAnon(LocData);
auto GV = new GlobalVariable(M, LocStruct->getType(), true,
GlobalValue::PrivateLinkage, LocStruct,
kAsanGenPrefix);
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
return GV;
}
/// Check if \p G has been created by a trusted compiler pass.
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
// Do not instrument @llvm.global_ctors, @llvm.used, etc.
if (G->getName().startswith("llvm."))
return true;
// Do not instrument asan globals.
if (G->getName().startswith(kAsanGenPrefix) ||
G->getName().startswith(kSanCovGenPrefix) ||
G->getName().startswith(kODRGenPrefix))
return true;
// Do not instrument gcov counter arrays.
if (G->getName() == "__llvm_gcov_ctr")
return true;
return false;
}
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
// Shadow >> scale
Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
if (Mapping.Offset == 0) return Shadow;
// (Shadow >> scale) | offset
Value *ShadowBase;
if (LocalDynamicShadow)
ShadowBase = LocalDynamicShadow;
else
ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
if (Mapping.OrShadowOffset)
return IRB.CreateOr(Shadow, ShadowBase);
else
return IRB.CreateAdd(Shadow, ShadowBase);
}
// Instrument memset/memmove/memcpy
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
if (isa<MemTransferInst>(MI)) {
IRB.CreateCall(
isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
{IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
} else if (isa<MemSetInst>(MI)) {
IRB.CreateCall(
AsanMemset,
{IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
}
MI->eraseFromParent();
}
/// Check if we want (and can) handle this alloca.
bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
return PreviouslySeenAllocaInfo->getSecond();
bool IsInteresting =
(AI.getAllocatedType()->isSized() &&
// alloca() may be called with 0 size, ignore it.
((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) &&
// We are only interested in allocas not promotable to registers.
// Promotable allocas are common under -O0.
(!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
// inalloca allocas are not treated as static, and we don't want
// dynamic alloca instrumentation for them as well.
!AI.isUsedWithInAlloca() &&
// swifterror allocas are register promoted by ISel
!AI.isSwiftError());
ProcessedAllocas[&AI] = IsInteresting;
return IsInteresting;
}
Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
bool *IsWrite,
uint64_t *TypeSize,
unsigned *Alignment,
Value **MaybeMask) {
// Skip memory accesses inserted by another instrumentation.
if (I->getMetadata("nosanitize")) return nullptr;
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return nullptr;
Value *PtrOperand = nullptr;
const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
*TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
*Alignment = LI->getAlignment();
PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*Alignment = SI->getAlignment();
PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*Alignment = 0;
PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*Alignment = 0;
PtrOperand = XCHG->getPointerOperand();
} else if (auto CI = dyn_cast<CallInst>(I)) {
auto *F = dyn_cast<Function>(CI->getCalledValue());
if (F && (F->getName().startswith("llvm.masked.load.") ||
F->getName().startswith("llvm.masked.store."))) {
unsigned OpOffset = 0;
if (F->getName().startswith("llvm.masked.store.")) {
if (!ClInstrumentWrites)
return nullptr;
// Masked store has an initial operand for the value.
OpOffset = 1;
*IsWrite = true;
} else {
if (!ClInstrumentReads)
return nullptr;
*IsWrite = false;
}
auto BasePtr = CI->getOperand(0 + OpOffset);
auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
*TypeSize = DL.getTypeStoreSizeInBits(Ty);
if (auto AlignmentConstant =
dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
*Alignment = (unsigned)AlignmentConstant->getZExtValue();
else
*Alignment = 1; // No alignment guarantees. We probably got Undef
if (MaybeMask)
*MaybeMask = CI->getOperand(2 + OpOffset);
PtrOperand = BasePtr;
}
}
if (PtrOperand) {
// Do not instrument acesses from different address spaces; we cannot deal
// with them.
Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0)
return nullptr;
// Ignore swifterror addresses.
// swifterror memory addresses are mem2reg promoted by instruction
// selection. As such they cannot have regular uses like an instrumentation
// function and it makes no sense to track them as memory.
if (PtrOperand->isSwiftError())
return nullptr;
}
// Treat memory accesses to promotable allocas as non-interesting since they
// will not cause memory violations. This greatly speeds up the instrumented
// executable at -O0.
if (ClSkipPromotableAllocas)
if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
return isInterestingAlloca(*AI) ? AI : nullptr;
return PtrOperand;
}
static bool isPointerOperand(Value *V) {
return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
}
// This is a rough heuristic; it may cause both false positives and
// false negatives. The proper implementation requires cooperation with
// the frontend.
static bool isInterestingPointerComparison(Instruction *I) {
if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
if (!Cmp->isRelational())
return false;
} else {
return false;
}
return isPointerOperand(I->getOperand(0)) &&
isPointerOperand(I->getOperand(1));
}
// This is a rough heuristic; it may cause both false positives and
// false negatives. The proper implementation requires cooperation with
// the frontend.
static bool isInterestingPointerSubtraction(Instruction *I) {
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
if (BO->getOpcode() != Instruction::Sub)
return false;
} else {
return false;
}
return isPointerOperand(I->getOperand(0)) &&
isPointerOperand(I->getOperand(1));
}
bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
// If a global variable does not have dynamic initialization we don't
// have to instrument it. However, if a global does not have initializer
// at all, we assume it has dynamic initializer (in other TU).
//
// FIXME: Metadata should be attched directly to the global directly instead
// of being added to llvm.asan.globals.
return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
}
void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
Instruction *I) {
IRBuilder<> IRB(I);
FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
for (Value *&i : Param) {
if (i->getType()->isPointerTy())
i = IRB.CreatePointerCast(i, IntptrTy);
}
IRB.CreateCall(F, Param);
}
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
Instruction *InsertBefore, Value *Addr,
unsigned Alignment, unsigned Granularity,
uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp) {
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
// if the data is properly aligned.
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
TypeSize == 128) &&
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
nullptr, UseCalls, Exp);
Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
IsWrite, nullptr, UseCalls, Exp);
}
static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
const DataLayout &DL, Type *IntptrTy,
Value *Mask, Instruction *I,
Value *Addr, unsigned Alignment,
unsigned Granularity, uint32_t TypeSize,
bool IsWrite, Value *SizeArgument,
bool UseCalls, uint32_t Exp) {
auto *VTy = cast<PointerType>(Addr->getType())->getElementType();
uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
unsigned Num = VTy->getVectorNumElements();
auto Zero = ConstantInt::get(IntptrTy, 0);
for (unsigned Idx = 0; Idx < Num; ++Idx) {
Value *InstrumentedAddress = nullptr;
Instruction *InsertBefore = I;
if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
// dyn_cast as we might get UndefValue
if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
if (Masked->isZero())
// Mask is constant false, so no instrumentation needed.
continue;
// If we have a true or undef value, fall through to doInstrumentAddress
// with InsertBefore == I
}
} else {
IRBuilder<> IRB(I);
Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
InsertBefore = ThenTerm;
}
IRBuilder<> IRB(InsertBefore);
InstrumentedAddress =
IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
Granularity, ElemTypeSize, IsWrite, SizeArgument,
UseCalls, Exp);
}
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
Instruction *I, bool UseCalls,
const DataLayout &DL) {
bool IsWrite = false;
unsigned Alignment = 0;
uint64_t TypeSize = 0;
Value *MaybeMask = nullptr;
Value *Addr =
isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
assert(Addr);
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
// instrumentation (assess false negatives). Instead of completely removing
// some instrumentation, you set Exp to a non-zero value (mask of optimization
// experiments that want to remove instrumentation of this instruction).
// If Exp is non-zero, this pass will emit special calls into runtime
// (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
// make runtime terminate the program in a special way (with a different
// exit status). Then you run the new compiler on a buggy corpus, collect
// the special terminations (ideally, you don't see them at all -- no false
// negatives) and make the decision on the optimization.
uint32_t Exp = ClForceExperiment;
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
return;
}
}
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
}
}
if (IsWrite)
NumInstrumentedWrites++;
else
NumInstrumentedReads++;
unsigned Granularity = 1 << Mapping.Scale;
if (MaybeMask) {
instrumentMaskedLoadOrStore(this, DL, IntptrTy, MaybeMask, I, Addr,
Alignment, Granularity, TypeSize, IsWrite,
nullptr, UseCalls, Exp);
} else {
doInstrumentAddress(this, I, I, Addr, Alignment, Granularity, TypeSize,
IsWrite, nullptr, UseCalls, Exp);
}
}
Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
Value *Addr, bool IsWrite,
size_t AccessSizeIndex,
Value *SizeArgument,
uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
CallInst *Call = nullptr;
if (SizeArgument) {
if (Exp == 0)
Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
{Addr, SizeArgument});
else
Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
{Addr, SizeArgument, ExpVal});
} else {
if (Exp == 0)
Call =
IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
else
Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
{Addr, ExpVal});
}
// We don't do Call->setDoesNotReturn() because the BB already has
// UnreachableInst at the end.
// This EmptyAsm is required to avoid callback merge.
IRB.CreateCall(EmptyAsm, {});
return Call;
}
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue,
uint32_t TypeSize) {
size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
// Addr & (Granularity - 1)
Value *LastAccessedByte =
IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
// (Addr & (Granularity - 1)) + size - 1
if (TypeSize / 8 > 1)
LastAccessedByte = IRB.CreateAdd(
LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
// (uint8_t) ((Addr & (Granularity-1)) + size - 1)
LastAccessedByte =
IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
// ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
}
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp) {
bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad;
IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (UseCalls) {
if (Exp == 0)
IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
AddrLong);
else
IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
{AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
return;
}
if (IsMyriad) {
// Strip the cache bit and do range check.
// AddrLong &= ~kMyriadCacheBitMask32
AddrLong = IRB.CreateAnd(AddrLong, ~kMyriadCacheBitMask32);
// Tag = AddrLong >> kMyriadTagShift
Value *Tag = IRB.CreateLShr(AddrLong, kMyriadTagShift);
// Tag == kMyriadDDRTag
Value *TagCheck =
IRB.CreateICmpEQ(Tag, ConstantInt::get(IntptrTy, kMyriadDDRTag));
Instruction *TagCheckTerm =
SplitBlockAndInsertIfThen(TagCheck, InsertBefore, false,
MDBuilder(*C).createBranchWeights(1, 100000));
assert(cast<BranchInst>(TagCheckTerm)->isUnconditional());
IRB.SetInsertPoint(TagCheckTerm);
InsertBefore = TagCheckTerm;
}
Type *ShadowTy =
IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *CmpVal = Constant::getNullValue(ShadowTy);
Value *ShadowValue =
IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1ULL << Mapping.Scale;
Instruction *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
// We use branch weights for the slow path check, to indicate that the slow
// path is rarely taken. This seems to be the case for SPEC benchmarks.
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
assert(cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
if (Recover) {
CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
} else {
BasicBlock *CrashBlock =
BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
CrashTerm = new UnreachableInst(*C, CrashBlock);
BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
ReplaceInstWithInst(CheckTerm, NewTerm);
}
} else {
CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
}
Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
AccessSizeIndex, SizeArgument, Exp);
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
// Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
void AddressSanitizer::instrumentUnusualSizeOrAlignment(
Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
if (Exp == 0)
IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
{AddrLong, Size});
else
IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
{AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
} else {
Value *LastByte = IRB.CreateIntToPtr(
IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
Addr->getType());
instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
}
}
void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
IRBuilder<> IRB(&GlobalInit.front(),
GlobalInit.front().getFirstInsertionPt());
// Add a call to poison all external globals before the given function starts.
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
// Add calls to unpoison all globals before each return instruction.
for (auto &BB : GlobalInit.getBasicBlockList())
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
CallInst::Create(AsanUnpoisonGlobals, "", RI);
}
void ModuleAddressSanitizer::createInitializerPoisonCalls(
Module &M, GlobalValue *ModuleName) {
GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
if (!GV)
return;
ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
if (!CA)
return;
for (Use &OP : CA->operands()) {
if (isa<ConstantAggregateZero>(OP)) continue;
ConstantStruct *CS = cast<ConstantStruct>(OP);
// Must have a function or null ptr.
if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
if (F->getName() == kAsanModuleCtorName) continue;
ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
// Don't instrument CTORs that will run before asan.module_ctor.
if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
poisonOneInitializer(*F, ModuleName);
}
}
}
bool ModuleAddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = G->getValueType();
LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
// FIXME: Metadata should be attched directly to the global directly instead
// of being added to llvm.asan.globals.
if (GlobalsMD.get(G).IsBlacklisted) return false;
if (!Ty->isSized()) return false;
if (!G->hasInitializer()) return false;
if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
// Two problems with thread-locals:
// - The address of the main thread's copy can't be computed at link-time.
// - Need to poison all copies, not just the main thread's one.
if (G->isThreadLocal()) return false;
// For now, just ignore this Global if the alignment is large.
if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
// For non-COFF targets, only instrument globals known to be defined by this
// TU.
// FIXME: We can instrument comdat globals on ELF if we are using the
// GC-friendly metadata scheme.
if (!TargetTriple.isOSBinFormatCOFF()) {
if (!G->hasExactDefinition() || G->hasComdat())
return false;
} else {
// On COFF, don't instrument non-ODR linkages.
if (G->isInterposable())
return false;
}
// If a comdat is present, it must have a selection kind that implies ODR
// semantics: no duplicates, any, or exact match.
if (Comdat *C = G->getComdat()) {
switch (C->getSelectionKind()) {
case Comdat::Any:
case Comdat::ExactMatch:
case Comdat::NoDuplicates:
break;
case Comdat::Largest:
case Comdat::SameSize:
return false;
}
}
if (G->hasSection()) {
StringRef Section = G->getSection();
// Globals from llvm.metadata aren't emitted, do not instrument them.
if (Section == "llvm.metadata") return false;
// Do not instrument globals from special LLVM sections.
if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false;
// Do not instrument function pointers to initialization and termination
// routines: dynamic linker will not properly handle redzones.
if (Section.startswith(".preinit_array") ||
Section.startswith(".init_array") ||
Section.startswith(".fini_array")) {
return false;
}
// On COFF, if the section name contains '$', it is highly likely that the
// user is using section sorting to create an array of globals similar to
// the way initialization callbacks are registered in .init_array and
// .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
// to such globals is counterproductive, because the intent is that they
// will form an array, and out-of-bounds accesses are expected.
// See https://github.com/google/sanitizers/issues/305
// and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
<< *G << "\n");
return false;
}
if (TargetTriple.isOSBinFormatMachO()) {
StringRef ParsedSegment, ParsedSection;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
assert(ErrorCode.empty() && "Invalid section specifier.");
// Ignore the globals from the __OBJC section. The ObjC runtime assumes
// those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
// them.
if (ParsedSegment == "__OBJC" ||
(ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
return false;
}
// See https://github.com/google/sanitizers/issues/32
// Constant CFString instances are compiled in the following way:
// -- the string buffer is emitted into
// __TEXT,__cstring,cstring_literals
// -- the constant NSConstantString structure referencing that buffer
// is placed into __DATA,__cfstring
// Therefore there's no point in placing redzones into __DATA,__cfstring.
// Moreover, it causes the linker to crash on OS X 10.7
if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
return false;
}
// The linker merges the contents of cstring_literals and removes the
// trailing zeroes.
if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
return false;
}
}
}
return true;
}
// On Mach-O platforms, we emit global metadata in a separate section of the
// binary in order to allow the linker to properly dead strip. This is only
// supported on recent versions of ld64.
bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
if (!TargetTriple.isOSBinFormatMachO())
return false;
if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
return true;
if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
return true;
if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
return true;
return false;
}
StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
switch (TargetTriple.getObjectFormat()) {
case Triple::COFF: return ".ASAN$GL";
case Triple::ELF: return "asan_globals";
case Triple::MachO: return "__DATA,__asan_globals,regular";
default: break;
}
llvm_unreachable("unsupported object format");
}
void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Declare our poisoning and unpoisoning functions.
AsanPoisonGlobals =
M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
AsanUnpoisonGlobals =
M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
// Declare functions that register/unregister globals.
AsanRegisterGlobals = M.getOrInsertFunction(
kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
AsanUnregisterGlobals = M.getOrInsertFunction(
kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
// Declare the functions that find globals in a shared object and then invoke
// the (un)register function on them.
AsanRegisterImageGlobals = M.getOrInsertFunction(
kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
AsanUnregisterImageGlobals = M.getOrInsertFunction(
kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
AsanRegisterElfGlobals =
M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
IntptrTy, IntptrTy, IntptrTy);
AsanUnregisterElfGlobals =
M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
IntptrTy, IntptrTy, IntptrTy);
}
// Put the metadata and the instrumented global in the same group. This ensures
// that the metadata is discarded if the instrumented global is discarded.
void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
Module &M = *G->getParent();
Comdat *C = G->getComdat();
if (!C) {
if (!G->hasName()) {
// If G is unnamed, it must be internal. Give it an artificial name
// so we can put it in a comdat.
assert(G->hasLocalLinkage());
G->setName(Twine(kAsanGenPrefix) + "_anon_global");
}
if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
std::string Name = G->getName();
Name += InternalSuffix;
C = M.getOrInsertComdat(Name);
} else {
C = M.getOrInsertComdat(G->getName());
}
// Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
// linkage to internal linkage so that a symbol table entry is emitted. This
// is necessary in order to create the comdat group.
if (TargetTriple.isOSBinFormatCOFF()) {
C->setSelectionKind(Comdat::NoDuplicates);
if (G->hasPrivateLinkage())
G->setLinkage(GlobalValue::InternalLinkage);
}
G->setComdat(C);
}
assert(G->hasComdat());
Metadata->setComdat(G->getComdat());
}
// Create a separate metadata global and put it in the appropriate ASan
// global registration section.
GlobalVariable *
ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
StringRef OriginalName) {
auto Linkage = TargetTriple.isOSBinFormatMachO()
? GlobalVariable::InternalLinkage
: GlobalVariable::PrivateLinkage;
GlobalVariable *Metadata = new GlobalVariable(
M, Initializer->getType(), false, Linkage, Initializer,
Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
Metadata->setSection(getGlobalMetadataSection());
return Metadata;
}
IRBuilder<> ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
AsanDtorFunction =
Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
return IRBuilder<>(ReturnInst::Create(*C, AsanDtorBB));
}
void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers) {
assert(ExtendedGlobals.size() == MetadataInitializers.size());
auto &DL = M.getDataLayout();
for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
Constant *Initializer = MetadataInitializers[i];
GlobalVariable *G = ExtendedGlobals[i];
GlobalVariable *Metadata =
CreateMetadataGlobal(M, Initializer, G->getName());
// The MSVC linker always inserts padding when linking incrementally. We
// cope with that by aligning each struct to its size, which must be a power
// of two.
unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
assert(isPowerOf2_32(SizeOfGlobalStruct) &&
"global metadata will not be padded appropriately");
Metadata->setAlignment(SizeOfGlobalStruct);
SetComdatForGlobalMetadata(G, Metadata, "");
}
}
void ModuleAddressSanitizer::InstrumentGlobalsELF(
IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers,
const std::string &UniqueModuleId) {
assert(ExtendedGlobals.size() == MetadataInitializers.size());
SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
GlobalVariable *G = ExtendedGlobals[i];
GlobalVariable *Metadata =
CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
Metadata->setMetadata(LLVMContext::MD_associated, MD);
MetadataGlobals[i] = Metadata;
SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
}
// Update llvm.compiler.used, adding the new metadata globals. This is
// needed so that during LTO these variables stay alive.
if (!MetadataGlobals.empty())
appendToCompilerUsed(M, MetadataGlobals);
// RegisteredFlag serves two purposes. First, we can pass it to dladdr()
// to look up the loaded image that contains it. Second, we can store in it
// whether registration has already occurred, to prevent duplicate
// registration.
//
// Common linkage ensures that there is only one global per shared library.
GlobalVariable *RegisteredFlag = new GlobalVariable(
M, IntptrTy, false, GlobalVariable::CommonLinkage,
ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
// Create start and stop symbols.
GlobalVariable *StartELFMetadata = new GlobalVariable(
M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
"__start_" + getGlobalMetadataSection());
StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
GlobalVariable *StopELFMetadata = new GlobalVariable(
M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
"__stop_" + getGlobalMetadataSection());
StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
// Create a call to register the globals with the runtime.
IRB.CreateCall(AsanRegisterElfGlobals,
{IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
// We also need to unregister globals at the end, e.g., when a shared library
// gets closed.
IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
IRB_Dtor.CreateCall(AsanUnregisterElfGlobals,
{IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
}
void ModuleAddressSanitizer::InstrumentGlobalsMachO(
IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers) {
assert(ExtendedGlobals.size() == MetadataInitializers.size());
// On recent Mach-O platforms, use a structure which binds the liveness of
// the global variable to the metadata struct. Keep the list of "Liveness" GV
// created to be added to llvm.compiler.used
StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
Constant *Initializer = MetadataInitializers[i];
GlobalVariable *G = ExtendedGlobals[i];
GlobalVariable *Metadata =
CreateMetadataGlobal(M, Initializer, G->getName());
// On recent Mach-O platforms, we emit the global metadata in a way that
// allows the linker to properly strip dead globals.
auto LivenessBinder =
ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
ConstantExpr::getPointerCast(Metadata, IntptrTy));
GlobalVariable *Liveness = new GlobalVariable(
M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
Twine("__asan_binder_") + G->getName());
Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
LivenessGlobals[i] = Liveness;
}
// Update llvm.compiler.used, adding the new liveness globals. This is
// needed so that during LTO these variables stay alive. The alternative
// would be to have the linker handling the LTO symbols, but libLTO
// current API does not expose access to the section for each symbol.
if (!LivenessGlobals.empty())
appendToCompilerUsed(M, LivenessGlobals);
// RegisteredFlag serves two purposes. First, we can pass it to dladdr()
// to look up the loaded image that contains it. Second, we can store in it
// whether registration has already occurred, to prevent duplicate
// registration.
//
// common linkage ensures that there is only one global per shared library.
GlobalVariable *RegisteredFlag = new GlobalVariable(
M, IntptrTy, false, GlobalVariable::CommonLinkage,
ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
IRB.CreateCall(AsanRegisterImageGlobals,
{IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
// We also need to unregister globals at the end, e.g., when a shared library
// gets closed.
IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
IRB_Dtor.CreateCall(AsanUnregisterImageGlobals,
{IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
}
void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers) {
assert(ExtendedGlobals.size() == MetadataInitializers.size());
unsigned N = ExtendedGlobals.size();
assert(N > 0);
// On platforms that don't have a custom metadata section, we emit an array
// of global metadata structures.
ArrayType *ArrayOfGlobalStructTy =
ArrayType::get(MetadataInitializers[0]->getType(), N);
auto AllGlobals = new GlobalVariable(
M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
if (Mapping.Scale > 3)
AllGlobals->setAlignment(1ULL << Mapping.Scale);
IRB.CreateCall(AsanRegisterGlobals,
{IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, N)});
// We also need to unregister globals at the end, e.g., when a shared library
// gets closed.
IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
IRB_Dtor.CreateCall(AsanUnregisterGlobals,
{IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, N)});
}
// This function replaces all global variables with new variables that have
// trailing redzones. It also creates a function that poisons
// redzones and inserts this function into llvm.global_ctors.
// Sets *CtorComdat to true if the global registration code emitted into the
// asan constructor is comdat-compatible.
bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
bool *CtorComdat) {
*CtorComdat = false;
SmallVector<GlobalVariable *, 16> GlobalsToChange;
for (auto &G : M.globals()) {
if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G);
}
size_t n = GlobalsToChange.size();
if (n == 0) {
*CtorComdat = true;
return false;
}
auto &DL = M.getDataLayout();
// A global is described by a structure
// size_t beg;
// size_t size;
// size_t size_with_redzone;
// const char *name;
// const char *module_name;
// size_t has_dynamic_init;
// void *source_location;
// size_t odr_indicator;
// We initialize an array of such structures and pass it to a run-time call.
StructType *GlobalStructTy =
StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
IntptrTy, IntptrTy, IntptrTy);
SmallVector<GlobalVariable *, 16> NewGlobals(n);
SmallVector<Constant *, 16> Initializers(n);
bool HasDynamicallyInitializedGlobals = false;
// We shouldn't merge same module names, as this string serves as unique
// module ID in runtime.
GlobalVariable *ModuleName = createPrivateGlobalForString(
M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix);
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
// FIXME: Metadata should be attched directly to the global directly instead
// of being added to llvm.asan.globals.
auto MD = GlobalsMD.get(G);
StringRef NameForGlobal = G->getName();
// Create string holding the global name (use global name from metadata
// if it's available, otherwise just write the name of global variable).
GlobalVariable *Name = createPrivateGlobalForString(
M, MD.Name.empty() ? NameForGlobal : MD.Name,
/*AllowMerging*/ true, kAsanGenPrefix);
Type *Ty = G->getValueType();
uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
uint64_t RZ = std::max(
MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
uint64_t RightRedzoneSize = RZ;
// Round up to MinRZ
if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
Constant *NewInitializer = ConstantStruct::get(
NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
// Create a new global variable with enough space for a redzone.
GlobalValue::LinkageTypes Linkage = G->getLinkage();
if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
Linkage = GlobalValue::InternalLinkage;
GlobalVariable *NewGlobal =
new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
"", G, G->getThreadLocalMode());
NewGlobal->copyAttributesFrom(G);
NewGlobal->setComdat(G->getComdat());
NewGlobal->setAlignment(MinRZ);
// Don't fold globals with redzones. ODR violation detector and redzone
// poisoning implicitly creates a dependence on the global's address, so it
// is no longer valid for it to be marked unnamed_addr.
NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
// Move null-terminated C strings to "__asan_cstring" section on Darwin.
if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
G->isConstant()) {
auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
if (Seq && Seq->isCString())
NewGlobal->setSection("__TEXT,__asan_cstring,regular");
}
// Transfer the debug info. The payload starts at offset zero so we can
// copy the debug info over as is.
SmallVector<DIGlobalVariableExpression *, 1> GVs;
G->getDebugInfo(GVs);
for (auto *GV : GVs)
NewGlobal->addDebugInfo(GV);
Value *Indices2[2];
Indices2[0] = IRB.getInt32(0);
Indices2[1] = IRB.getInt32(0);
G->replaceAllUsesWith(
ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
NewGlobal->takeName(G);
G->eraseFromParent();
NewGlobals[i] = NewGlobal;
Constant *SourceLoc;
if (!MD.SourceLoc.empty()) {
auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
} else {
SourceLoc = ConstantInt::get(IntptrTy, 0);
}
Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
GlobalValue *InstrumentedGlobal = NewGlobal;
bool CanUsePrivateAliases =
TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
TargetTriple.isOSBinFormatWasm();
if (CanUsePrivateAliases && UsePrivateAlias) {
// Create local alias for NewGlobal to avoid crash on ODR between
// instrumented and non-instrumented libraries.
InstrumentedGlobal =
GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
}
// ODR should not happen for local linkage.
if (NewGlobal->hasLocalLinkage()) {
ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
IRB.getInt8PtrTy());
} else if (UseOdrIndicator) {
// With local aliases, we need to provide another externally visible
// symbol __odr_asan_XXX to detect ODR violation.
auto *ODRIndicatorSym =
new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
Constant::getNullValue(IRB.getInt8Ty()),
kODRGenPrefix + NameForGlobal, nullptr,
NewGlobal->getThreadLocalMode());
// Set meaningful attributes for indicator symbol.
ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
ODRIndicatorSym->setAlignment(1);
ODRIndicator = ODRIndicatorSym;
}
Constant *Initializer = ConstantStruct::get(
GlobalStructTy,
ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
ConstantInt::get(IntptrTy, SizeInBytes),
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
ConstantExpr::getPointerCast(ModuleName, IntptrTy),
ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc,
ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
Initializers[i] = Initializer;
}
// Add instrumented globals to llvm.compiler.used list to avoid LTO from
// ConstantMerge'ing them.
SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
for (size_t i = 0; i < n; i++) {
GlobalVariable *G = NewGlobals[i];
if (G->getName().empty()) continue;
GlobalsToAddToUsedList.push_back(G);
}
appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
std::string ELFUniqueModuleId =
(UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M)
: "";
if (!ELFUniqueModuleId.empty()) {
InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
*CtorComdat = true;
} else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
} else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
} else {
InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
}
// Create calls for poisoning before initializers run and unpoisoning after.
if (HasDynamicallyInitializedGlobals)
createInitializerPoisonCalls(M, ModuleName);
LLVM_DEBUG(dbgs() << M);
return true;
}
int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
int LongSize = M.getDataLayout().getPointerSizeInBits();
bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
int Version = 8;
// 32-bit Android is one version ahead because of the switch to dynamic
// shadow.
Version += (LongSize == 32 && isAndroid);
return Version;
}
bool ModuleAddressSanitizer::instrumentModule(Module &M) {
initializeCallbacks(M);
if (CompileKernel)
return false;
// Create a module constructor. A destructor is created lazily because not all
// platforms, and not all modules need it.
std::string VersionCheckName =
kAsanVersionCheckNamePrefix + std::to_string(GetAsanVersion(M));
std::tie(AsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
/*InitArgs=*/{}, VersionCheckName);
bool CtorComdat = true;
bool Changed = false;
// TODO(glider): temporarily disabled globals instrumentation for KASan.
if (ClGlobals) {
IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
Changed |= InstrumentGlobals(IRB, M, &CtorComdat);
}
// Put the constructor and destructor in comdat if both
// (1) global instrumentation is not TU-specific
// (2) target is ELF.
if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority,
AsanCtorFunction);
if (AsanDtorFunction) {
AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority,
AsanDtorFunction);
}
} else {
appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
if (AsanDtorFunction)
appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
}
return Changed;
}
void AddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
// IsWrite, TypeSize and Exp are encoded in the function name.
for (int Exp = 0; Exp < 2; Exp++) {
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string ExpStr = Exp ? "exp_" : "";
const std::string EndingStr = Recover ? "_noabort" : "";
SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
SmallVector<Type *, 2> Args1{1, IntptrTy};
if (Exp) {
Type *ExpType = Type::getInt32Ty(*C);
Args2.push_back(ExpType);
Args1.push_back(ExpType);
}
AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
FunctionType::get(IRB.getVoidTy(), Args2, false));
AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
FunctionType::get(IRB.getVoidTy(), Args2, false));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
M.getOrInsertFunction(
kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
FunctionType::get(IRB.getVoidTy(), Args1, false));
AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
FunctionType::get(IRB.getVoidTy(), Args1, false));
}
}
}
const std::string MemIntrinCallbackPrefix =
CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy);
AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy);
AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt32Ty(), IntptrTy);
AsanHandleNoReturnFunc =
M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
AsanPtrCmpFunction =
M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
AsanPtrSubFunction =
M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
if (Mapping.InGlobal)
AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
ArrayType::get(IRB.getInt8Ty(), 0));
}
bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
// For each NSObject descendant having a +load method, this method is invoked
// by the ObjC runtime before any of the static constructors is called.
// Therefore we need to instrument such methods with a call to __asan_init
// at the beginning in order to initialize our runtime before any access to
// the shadow memory.
// We cannot just ignore these methods, because they may call other
// instrumented functions.
if (F.getName().find(" load]") != std::string::npos) {
FunctionCallee AsanInitFunction =
declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
IRBuilder<> IRB(&F.front(), F.front().begin());
IRB.CreateCall(AsanInitFunction, {});
return true;
}
return false;
}
void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
// Generate code only when dynamic addressing is needed.
if (Mapping.Offset != kDynamicShadowSentinel)
return;
IRBuilder<> IRB(&F.front().front());
if (Mapping.InGlobal) {
if (ClWithIfuncSuppressRemat) {
// An empty inline asm with input reg == output reg.
// An opaque pointer-to-int cast, basically.
InlineAsm *Asm = InlineAsm::get(
FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
StringRef(""), StringRef("=r,0"),
/*hasSideEffects=*/false);
LocalDynamicShadow =
IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
} else {
LocalDynamicShadow =
IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
}
} else {
Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
kAsanShadowMemoryDynamicAddress, IntptrTy);
LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
}
}
void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
// Find the one possible call to llvm.localescape and pre-mark allocas passed
// to it as uninteresting. This assumes we haven't started processing allocas
// yet. This check is done up front because iterating the use list in
// isInterestingAlloca would be algorithmically slower.
assert(ProcessedAllocas.empty() && "must process localescape before allocas");
// Try to get the declaration of llvm.localescape. If it's not in the module,
// we can exit early.
if (!F.getParent()->getFunction("llvm.localescape")) return;
// Look for a call to llvm.localescape call in the entry block. It can't be in
// any other block.
for (Instruction &I : F.getEntryBlock()) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
if (II && II->getIntrinsicID() == Intrinsic::localescape) {
// We found a call. Mark all the allocas passed in as uninteresting.
for (Value *Arg : II->arg_operands()) {
AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
assert(AI && AI->isStaticAlloca() &&
"non-static alloca arg to localescape");
ProcessedAllocas[AI] = false;
}
break;
}
}
}
bool AddressSanitizer::instrumentFunction(Function &F,
const TargetLibraryInfo *TLI) {
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
if (F.getName().startswith("__asan_")) return false;
bool FunctionModified = false;
// If needed, insert __asan_init before checking for SanitizeAddress attr.
// This function needs to be called even if the function body is not
// instrumented.
if (maybeInsertAsanInitAtFunctionEntry(F))
FunctionModified = true;
// Leave if the function doesn't need instrumentation.
if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
initializeCallbacks(*F.getParent());
FunctionStateRAII CleanupObj(this);
maybeInsertDynamicShadowAtFunctionEntry(F);
// We can't instrument allocas used with llvm.localescape. Only static allocas
// can be passed to that intrinsic.
markEscapedLocalAllocas(F);
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
SmallVector<Instruction *, 16> ToInstrument;
SmallVector<Instruction *, 8> NoReturnCalls;
SmallVector<BasicBlock *, 16> AllBlocks;
SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
bool IsWrite;
unsigned Alignment;
uint64_t TypeSize;
// Fill the set of memory operations to instrument.
for (auto &BB : F) {
AllBlocks.push_back(&BB);
TempsToInstrument.clear();
int NumInsnsPerBB = 0;
for (auto &Inst : BB) {
if (LooksLikeCodeInBug11395(&Inst)) return false;
Value *MaybeMask = nullptr;
if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
&Alignment, &MaybeMask)) {
if (ClOpt && ClOptSameTemp) {
// If we have a mask, skip instrumentation if we've already
// instrumented the full object. But don't add to TempsToInstrument
// because we might get another load/store with a different mask.
if (MaybeMask) {
if (TempsToInstrument.count(Addr))
continue; // We've seen this (whole) temp in the current BB.
} else {
if (!TempsToInstrument.insert(Addr).second)
continue; // We've seen this temp in the current BB.
}
}
} else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
isInterestingPointerComparison(&Inst)) ||
((ClInvalidPointerPairs || ClInvalidPointerSub) &&
isInterestingPointerSubtraction(&Inst))) {
PointerComparisonsOrSubtracts.push_back(&Inst);
continue;
} else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
} else {
if (isa<AllocaInst>(Inst)) NumAllocas++;
CallSite CS(&Inst);
if (CS) {
// A call inside BB.
TempsToInstrument.clear();
if (CS.doesNotReturn() && !CS->getMetadata("nosanitize"))
NoReturnCalls.push_back(CS.getInstruction());
}
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
continue;
}
ToInstrument.push_back(&Inst);
NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
}
}
bool UseCalls =
(ClInstrumentationWithCallsThreshold >= 0 &&
ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
const DataLayout &DL = F.getParent()->getDataLayout();
ObjectSizeOpts ObjSizeOpts;
ObjSizeOpts.RoundToAlign = true;
ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
// Instrument.
int NumInstrumented = 0;
for (auto Inst : ToInstrument) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
instrumentMop(ObjSizeVis, Inst, UseCalls,
F.getParent()->getDataLayout());
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
NumInstrumented++;
}
FunctionStackPoisoner FSP(F, *this);
bool ChangedStack = FSP.runOnFunction();
// We must unpoison the stack before NoReturn calls (throw, _exit, etc).
// See e.g. https://github.com/google/sanitizers/issues/37
for (auto CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
IRB.CreateCall(AsanHandleNoReturnFunc, {});
}
for (auto Inst : PointerComparisonsOrSubtracts) {
instrumentPointerComparisonOrSubtraction(Inst);
NumInstrumented++;
}
if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty())
FunctionModified = true;
LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
<< F << "\n");
return FunctionModified;
}
// Workaround for bug 11395: we don't want to instrument stack in functions
// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
// FIXME: remove once the bug 11395 is fixed.
bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
if (LongSize != 32) return false;
CallInst *CI = dyn_cast<CallInst>(I);
if (!CI || !CI->isInlineAsm()) return false;
if (CI->getNumArgOperands() <= 5) return false;
// We have inline assembly with quite a few arguments.
return true;
}
void FunctionStackPoisoner::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
std::string Suffix = itostr(i);
AsanStackMallocFunc[i] = M.getOrInsertFunction(
kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy);
AsanStackFreeFunc[i] =
M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
IRB.getVoidTy(), IntptrTy, IntptrTy);
}
if (ASan.UseAfterScope) {
AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
}
for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) {
std::ostringstream Name;
Name << kAsanSetShadowPrefix;
Name << std::setw(2) << std::setfill('0') << std::hex << Val;
AsanSetShadowFunc[Val] =
M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
}
AsanAllocaPoisonFunc = M.getOrInsertFunction(
kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
}
void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
ArrayRef<uint8_t> ShadowBytes,
size_t Begin, size_t End,
IRBuilder<> &IRB,
Value *ShadowBase) {
if (Begin >= End)
return;
const size_t LargestStoreSizeInBytes =
std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
// Poison given range in shadow using larges store size with out leading and
// trailing zeros in ShadowMask. Zeros never change, so they need neither
// poisoning nor up-poisoning. Still we don't mind if some of them get into a
// middle of a store.
for (size_t i = Begin; i < End;) {
if (!ShadowMask[i]) {
assert(!ShadowBytes[i]);
++i;
continue;
}
size_t StoreSizeInBytes = LargestStoreSizeInBytes;
// Fit store size into the range.
while (StoreSizeInBytes > End - i)
StoreSizeInBytes /= 2;
// Minimize store size by trimming trailing zeros.
for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
while (j <= StoreSizeInBytes / 2)
StoreSizeInBytes /= 2;
}
uint64_t Val = 0;
for (size_t j = 0; j < StoreSizeInBytes; j++) {
if (IsLittleEndian)
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
else
Val = (Val << 8) | ShadowBytes[i + j];
}
Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
IRB.CreateAlignedStore(
Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1);
i += StoreSizeInBytes;
}
}
void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
ArrayRef<uint8_t> ShadowBytes,
IRBuilder<> &IRB, Value *ShadowBase) {
copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
}
void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
ArrayRef<uint8_t> ShadowBytes,
size_t Begin, size_t End,
IRBuilder<> &IRB, Value *ShadowBase) {
assert(ShadowMask.size() == ShadowBytes.size());
size_t Done = Begin;
for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
if (!ShadowMask[i]) {
assert(!ShadowBytes[i]);
continue;
}
uint8_t Val = ShadowBytes[i];
if (!AsanSetShadowFunc[Val])
continue;
// Skip same values.
for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
}
if (j - i >= ClMaxInlinePoisoningSize) {
copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
IRB.CreateCall(AsanSetShadowFunc[Val],
{IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
ConstantInt::get(IntptrTy, j - i)});
Done = j;
}
}
copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
}
// Fake stack allocator (asan_fake_stack.h) has 11 size classes
// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
static int StackMallocSizeClass(uint64_t LocalStackSize) {
assert(LocalStackSize <= kMaxStackMallocSize);
uint64_t MaxSize = kMinStackMallocSize;
for (int i = 0;; i++, MaxSize *= 2)
if (LocalStackSize <= MaxSize) return i;
llvm_unreachable("impossible LocalStackSize");
}
void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
Instruction *CopyInsertPoint = &F.front().front();
if (CopyInsertPoint == ASan.LocalDynamicShadow) {
// Insert after the dynamic shadow location is determined
CopyInsertPoint = CopyInsertPoint->getNextNode();
assert(CopyInsertPoint);
}
IRBuilder<> IRB(CopyInsertPoint);
const DataLayout &DL = F.getParent()->getDataLayout();
for (Argument &Arg : F.args()) {
if (Arg.hasByValAttr()) {
Type *Ty = Arg.getType()->getPointerElementType();
unsigned Align = Arg.getParamAlignment();
if (Align == 0) Align = DL.getABITypeAlignment(Ty);
AllocaInst *AI = IRB.CreateAlloca(
Ty, nullptr,
(Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
".byval");
AI->setAlignment(Align);
Arg.replaceAllUsesWith(AI);
uint64_t AllocSize = DL.getTypeAllocSize(Ty);
IRB.CreateMemCpy(AI, Align, &Arg, Align, AllocSize);
}
}
}
PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
Value *ValueIfTrue,
Instruction *ThenTerm,
Value *ValueIfFalse) {
PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
PHI->addIncoming(ValueIfFalse, CondBlock);
BasicBlock *ThenBlock = ThenTerm->getParent();
PHI->addIncoming(ValueIfTrue, ThenBlock);
return PHI;
}
Value *FunctionStackPoisoner::createAllocaForLayout(
IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
AllocaInst *Alloca;
if (Dynamic) {
Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
"MyAlloca");
} else {
Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
nullptr, "MyAlloca");
assert(Alloca->isStaticAlloca());
}
assert((ClRealignStack & (ClRealignStack - 1)) == 0);
size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
Alloca->setAlignment(FrameAlignment);
return IRB.CreatePointerCast(Alloca, IntptrTy);
}
void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
BasicBlock &FirstBB = *F.begin();
IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
DynamicAllocaLayout->setAlignment(32);
}
void FunctionStackPoisoner::processDynamicAllocas() {
if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
assert(DynamicAllocaPoisonCallVec.empty());
return;
}
// Insert poison calls for lifetime intrinsics for dynamic allocas.
for (const auto &APC : DynamicAllocaPoisonCallVec) {
assert(APC.InsBefore);
assert(APC.AI);
assert(ASan.isInterestingAlloca(*APC.AI));
assert(!APC.AI->isStaticAlloca());
IRBuilder<> IRB(APC.InsBefore);
poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
// Dynamic allocas will be unpoisoned unconditionally below in
// unpoisonDynamicAllocas.
// Flag that we need unpoison static allocas.
}
// Handle dynamic allocas.
createDynamicAllocasInitStorage();
for (auto &AI : DynamicAllocaVec)
handleDynamicAllocaCall(AI);
unpoisonDynamicAllocas();
}
void FunctionStackPoisoner::processStaticAllocas() {
if (AllocaVec.empty()) {
assert(StaticAllocaPoisonCallVec.empty());
return;
}
int StackMallocIdx = -1;
DebugLoc EntryDebugLocation;
if (auto SP = F.getSubprogram())
EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP);
Instruction *InsBefore = AllocaVec[0];
IRBuilder<> IRB(InsBefore);
IRB.SetCurrentDebugLocation(EntryDebugLocation);
// Make sure non-instrumented allocas stay in the entry block. Otherwise,
// debug info is broken, because only entry-block allocas are treated as
// regular stack slots.
auto InsBeforeB = InsBefore->getParent();
assert(InsBeforeB == &F.getEntryBlock());
for (auto *AI : StaticAllocasToMoveUp)
if (AI->getParent() == InsBeforeB)
AI->moveBefore(InsBefore);
// If we have a call to llvm.localescape, keep it in the entry block.
if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size());
for (AllocaInst *AI : AllocaVec) {
ASanStackVariableDescription D = {AI->getName().data(),
ASan.getAllocaSizeInBytes(*AI),
0,
AI->getAlignment(),
AI,
0,
0};
SVD.push_back(D);
}
// Minimal header size (left redzone) is 4 pointers,
// i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
size_t Granularity = 1ULL << Mapping.Scale;
size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity);
const ASanStackFrameLayout &L =
ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
// Build AllocaToSVDMap for ASanStackVariableDescription lookup.
DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
for (auto &Desc : SVD)
AllocaToSVDMap[Desc.AI] = &Desc;
// Update SVD with information from lifetime intrinsics.
for (const auto &APC : StaticAllocaPoisonCallVec) {
assert(APC.InsBefore);
assert(APC.AI);
assert(ASan.isInterestingAlloca(*APC.AI));
assert(APC.AI->isStaticAlloca());
ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
Desc.LifetimeSize = Desc.Size;
if (const DILocation *FnLoc = EntryDebugLocation.get()) {
if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
if (LifetimeLoc->getFile() == FnLoc->getFile())
if (unsigned Line = LifetimeLoc->getLine())
Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
}
}
}
auto DescriptionString = ComputeASanStackFrameDescription(SVD);
LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
LocalStackSize <= kMaxStackMallocSize;
bool DoDynamicAlloca = ClDynamicAllocaStack;
// Don't do dynamic alloca or stack malloc if:
// 1) There is inline asm: too often it makes assumptions on which registers
// are available.
// 2) There is a returns_twice call (typically setjmp), which is
// optimization-hostile, and doesn't play well with introduced indirect
// register-relative calculation of local variable addresses.
DoDynamicAlloca &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
DoStackMalloc &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
Value *StaticAlloca =
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
Value *FakeStack;
Value *LocalStackBase;
Value *LocalStackBaseAlloca;
uint8_t DIExprFlags = DIExpression::ApplyOffset;
if (DoStackMalloc) {
LocalStackBaseAlloca =
IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
// void *FakeStack = __asan_option_detect_stack_use_after_return
// ? __asan_stack_malloc_N(LocalStackSize)
// : nullptr;
// void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
Constant::getNullValue(IRB.getInt32Ty()));
Instruction *Term =
SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
IRBuilder<> IRBIf(Term);
IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
StackMallocIdx = StackMallocSizeClass(LocalStackSize);
assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
Value *FakeStackValue =
IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
ConstantInt::get(IntptrTy, LocalStackSize));
IRB.SetInsertPoint(InsBefore);
IRB.SetCurrentDebugLocation(EntryDebugLocation);
FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
ConstantInt::get(IntptrTy, 0));
Value *NoFakeStack =
IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
IRBIf.SetInsertPoint(Term);
IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
Value *AllocaValue =
DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
IRB.SetInsertPoint(InsBefore);
IRB.SetCurrentDebugLocation(EntryDebugLocation);
LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
IRB.SetCurrentDebugLocation(EntryDebugLocation);
IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
DIExprFlags |= DIExpression::DerefBefore;
} else {
// void *FakeStack = nullptr;
// void *LocalStackBase = alloca(LocalStackSize);
FakeStack = ConstantInt::get(IntptrTy, 0);
LocalStackBase =
DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
LocalStackBaseAlloca = LocalStackBase;
}
// Replace Alloca instructions with base+offset.
for (const auto &Desc : SVD) {
AllocaInst *AI = Desc.AI;
replaceDbgDeclareForAlloca(AI, LocalStackBaseAlloca, DIB, DIExprFlags,
Desc.Offset);
Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
AI->getType());
AI->replaceAllUsesWith(NewAllocaPtr);
}
// The left-most redzone has enough space for at least 4 pointers.
// Write the Magic value to redzone[0].
Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
BasePlus0);
// Write the frame description constant to redzone[1].
Value *BasePlus1 = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase,
ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
IntptrPtrTy);
GlobalVariable *StackDescriptionGlobal =
createPrivateGlobalForString(*F.getParent(), DescriptionString,
/*AllowMerging*/ true, kAsanGenPrefix);
Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
IRB.CreateStore(Description, BasePlus1);
// Write the PC to redzone[2].
Value *BasePlus2 = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase,
ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
IntptrPtrTy);
IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
// Poison the stack red zones at the entry.
Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
// As mask we must use most poisoned case: red zones and after scope.
// As bytes we can use either the same or just red zones only.
copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
if (!StaticAllocaPoisonCallVec.empty()) {
const auto &ShadowInScope = GetShadowBytes(SVD, L);
// Poison static allocas near lifetime intrinsics.
for (const auto &APC : StaticAllocaPoisonCallVec) {
const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
assert(Desc.Offset % L.Granularity == 0);
size_t Begin = Desc.Offset / L.Granularity;
size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
IRBuilder<> IRB(APC.InsBefore);
copyToShadow(ShadowAfterScope,
APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
IRB, ShadowBase);
}
}
SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
SmallVector<uint8_t, 64> ShadowAfterReturn;
// (Un)poison the stack before all ret instructions.
for (auto Ret : RetVec) {
IRBuilder<> IRBRet(Ret);
// Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
BasePlus0);
if (DoStackMalloc) {
assert(StackMallocIdx >= 0);
// if FakeStack != 0 // LocalStackBase == FakeStack
// // In use-after-return mode, poison the whole stack frame.
// if StackMallocIdx <= 4
// // For small sizes inline the whole thing:
// memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
// **SavedFlagPtr(FakeStack) = 0
// else
// __asan_stack_free_N(FakeStack, LocalStackSize)
// else
// <This is not a fake stack; unpoison the redzones>
Value *Cmp =
IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
Instruction *ThenTerm, *ElseTerm;
SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
IRBuilder<> IRBPoison(ThenTerm);
if (StackMallocIdx <= 4) {
int ClassSize = kMinStackMallocSize << StackMallocIdx;
ShadowAfterReturn.resize(ClassSize / L.Granularity,
kAsanStackUseAfterReturnMagic);
copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
ShadowBase);
Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
FakeStack,
ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
Value *SavedFlagPtr = IRBPoison.CreateLoad(
IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
IRBPoison.CreateStore(
Constant::getNullValue(IRBPoison.getInt8Ty()),
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
} else {
// For larger frames call __asan_stack_free_*.
IRBPoison.CreateCall(
AsanStackFreeFunc[StackMallocIdx],
{FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
}
IRBuilder<> IRBElse(ElseTerm);
copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
} else {
copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
}
}
// We are done. Remove the old unused alloca instructions.
for (auto AI : AllocaVec) AI->eraseFromParent();
}
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
IRBuilder<> &IRB, bool DoPoison) {
// For now just insert the call to ASan runtime.
Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
Value *SizeArg = ConstantInt::get(IntptrTy, Size);
IRB.CreateCall(
DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
{AddrArg, SizeArg});
}
// Handling llvm.lifetime intrinsics for a given %alloca:
// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
// could be poisoned by previous llvm.lifetime.end instruction, as the
// variable may go in and out of scope several times, e.g. in loops).
// (3) if we poisoned at least one %alloca in a function,
// unpoison the whole stack frame at function exit.
void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
IRBuilder<> IRB(AI);
const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
Value *Zero = Constant::getNullValue(IntptrTy);
Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
// Since we need to extend alloca with additional memory to locate
// redzones, and OldSize is number of allocated blocks with
// ElementSize size, get allocated memory size in bytes by
// OldSize * ElementSize.
const unsigned ElementSize =
F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
Value *OldSize =
IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
ConstantInt::get(IntptrTy, ElementSize));
// PartialSize = OldSize % 32
Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
// Misalign = kAllocaRzSize - PartialSize;
Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
// PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
// AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
// Align is added to locate left redzone, PartialPadding for possible
// partial redzone and kAllocaRzSize for right redzone respectively.
Value *AdditionalChunkSize = IRB.CreateAdd(
ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
// Insert new alloca with new NewSize and Align params.
AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
NewAlloca->setAlignment(Align);
// NewAddress = Address + Align
Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
ConstantInt::get(IntptrTy, Align));
// Insert __asan_alloca_poison call for new created alloca.
IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
// Store the last alloca's address to DynamicAllocaLayout. We'll need this
// for unpoisoning stuff.
IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
// Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
AI->replaceAllUsesWith(NewAddressPtr);
// We are done. Erase old alloca from parent.
AI->eraseFromParent();
}
// isSafeAccess returns true if Addr is always inbounds with respect to its
// base object. For example, it is a field access or an array access with
// constant inbounds index.
bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
Value *Addr, uint64_t TypeSize) const {
SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
uint64_t Size = SizeOffset.first.getZExtValue();
int64_t Offset = SizeOffset.second.getSExtValue();
// Three checks are required to ensure safety:
// . Offset >= 0 (since the offset is given from the base ptr)
// . Size >= Offset (unsigned)
// . Size - Offset >= NeededSize (unsigned)
return Offset >= 0 && Size >= uint64_t(Offset) &&
Size - uint64_t(Offset) >= TypeSize / 8;
}
Index: stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp (revision 356465)
@@ -1,1037 +1,1043 @@
//===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers instrprof_* intrinsics emitted by a frontend for profiling.
// It also builds the data structures and initialization code needed for
// updating execution counts and emitting the profile at runtime.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <string>
using namespace llvm;
#define DEBUG_TYPE "instrprof"
// The start and end values of precise value profile range for memory
// intrinsic sizes
cl::opt<std::string> MemOPSizeRange(
"memop-size-range",
cl::desc("Set the range of size in memory intrinsic calls to be profiled "
"precisely, in a format of <start_val>:<end_val>"),
cl::init(""));
// The value that considered to be large value in memory intrinsic.
cl::opt<unsigned> MemOPSizeLarge(
"memop-size-large",
cl::desc("Set large value thresthold in memory intrinsic size profiling. "
"Value of 0 disables the large value profiling."),
cl::init(8192));
namespace {
cl::opt<bool> DoNameCompression("enable-name-compression",
cl::desc("Enable name string compression"),
cl::init(true));
cl::opt<bool> DoHashBasedCounterSplit(
"hash-based-counter-split",
cl::desc("Rename counter variable of a comdat function based on cfg hash"),
cl::init(true));
cl::opt<bool> ValueProfileStaticAlloc(
"vp-static-alloc",
cl::desc("Do static counter allocation for value profiler"),
cl::init(true));
cl::opt<double> NumCountersPerValueSite(
"vp-counters-per-site",
cl::desc("The average number of profile counters allocated "
"per value profiling site."),
// This is set to a very small value because in real programs, only
// a very small percentage of value sites have non-zero targets, e.g, 1/30.
// For those sites with non-zero profile, the average number of targets
// is usually smaller than 2.
cl::init(1.0));
cl::opt<bool> AtomicCounterUpdateAll(
"instrprof-atomic-counter-update-all", cl::ZeroOrMore,
cl::desc("Make all profile counter updates atomic (for testing only)"),
cl::init(false));
cl::opt<bool> AtomicCounterUpdatePromoted(
"atomic-counter-update-promoted", cl::ZeroOrMore,
cl::desc("Do counter update using atomic fetch add "
" for promoted counters only"),
cl::init(false));
// If the option is not specified, the default behavior about whether
// counter promotion is done depends on how instrumentaiton lowering
// pipeline is setup, i.e., the default value of true of this option
// does not mean the promotion will be done by default. Explicitly
// setting this option can override the default behavior.
cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore,
cl::desc("Do counter register promotion"),
cl::init(false));
cl::opt<unsigned> MaxNumOfPromotionsPerLoop(
cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20),
cl::desc("Max number counter promotions per loop to avoid"
" increasing register pressure too much"));
// A debug option
cl::opt<int>
MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1),
cl::desc("Max number of allowed counter promotions"));
cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting(
cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3),
cl::desc("The max number of exiting blocks of a loop to allow "
" speculative counter promotion"));
cl::opt<bool> SpeculativeCounterPromotionToLoop(
cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false),
cl::desc("When the option is false, if the target block is in a loop, "
"the promotion will be disallowed unless the promoted counter "
" update can be further/iteratively promoted into an acyclic "
" region."));
cl::opt<bool> IterativeCounterPromotion(
cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true),
cl::desc("Allow counter promotion across the whole loop nest."));
class InstrProfilingLegacyPass : public ModulePass {
InstrProfiling InstrProf;
public:
static char ID;
InstrProfilingLegacyPass() : ModulePass(ID) {}
InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false)
: ModulePass(ID), InstrProf(Options, IsCS) {}
StringRef getPassName() const override {
return "Frontend instrumentation-based coverage lowering";
}
bool runOnModule(Module &M) override {
return InstrProf.run(M, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
///
/// A helper class to promote one counter RMW operation in the loop
/// into register update.
///
/// RWM update for the counter will be sinked out of the loop after
/// the transformation.
///
class PGOCounterPromoterHelper : public LoadAndStorePromoter {
public:
PGOCounterPromoterHelper(
Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init,
BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks,
ArrayRef<Instruction *> InsertPts,
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
LoopInfo &LI)
: LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks),
InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) {
assert(isa<LoadInst>(L));
assert(isa<StoreInst>(S));
SSA.AddAvailableValue(PH, Init);
}
void doExtraRewritesBeforeFinalDeletion() override {
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
Instruction *InsertPos = InsertPts[i];
// Get LiveIn value into the ExitBlock. If there are multiple
// predecessors, the value is defined by a PHI node in this
// block.
Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
Type *Ty = LiveInValue->getType();
IRBuilder<> Builder(InsertPos);
if (AtomicCounterUpdatePromoted)
// automic update currently can only be promoted across the current
// loop, not the whole loop nest.
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
AtomicOrdering::SequentiallyConsistent);
else {
LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
auto *NewStore = Builder.CreateStore(NewVal, Addr);
// Now update the parent loop's candidate list:
if (IterativeCounterPromotion) {
auto *TargetLoop = LI.getLoopFor(ExitBlock);
if (TargetLoop)
LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore);
}
}
}
}
private:
Instruction *Store;
ArrayRef<BasicBlock *> ExitBlocks;
ArrayRef<Instruction *> InsertPts;
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
LoopInfo &LI;
};
/// A helper class to do register promotion for all profile counter
/// updates in a loop.
///
class PGOCounterPromoter {
public:
PGOCounterPromoter(
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI)
: LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
LI(LI), BFI(BFI) {
SmallVector<BasicBlock *, 8> LoopExitBlocks;
SmallPtrSet<BasicBlock *, 8> BlockSet;
L.getExitBlocks(LoopExitBlocks);
for (BasicBlock *ExitBlock : LoopExitBlocks) {
if (BlockSet.insert(ExitBlock).second) {
ExitBlocks.push_back(ExitBlock);
InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
}
}
}
bool run(int64_t *NumPromoted) {
// Skip 'infinite' loops:
if (ExitBlocks.size() == 0)
return false;
unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L);
if (MaxProm == 0)
return false;
unsigned Promoted = 0;
for (auto &Cand : LoopToCandidates[&L]) {
SmallVector<PHINode *, 4> NewPHIs;
SSAUpdater SSA(&NewPHIs);
Value *InitVal = ConstantInt::get(Cand.first->getType(), 0);
// If BFI is set, we will use it to guide the promotions.
if (BFI) {
auto *BB = Cand.first->getParent();
auto InstrCount = BFI->getBlockProfileCount(BB);
if (!InstrCount)
continue;
auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader());
// If the average loop trip count is not greater than 1.5, we skip
// promotion.
if (PreheaderCount &&
(PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2))
continue;
}
PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal,
L.getLoopPreheader(), ExitBlocks,
InsertPts, LoopToCandidates, LI);
Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second}));
Promoted++;
if (Promoted >= MaxProm)
break;
(*NumPromoted)++;
if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions)
break;
}
LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
<< L.getLoopDepth() << ")\n");
return Promoted != 0;
}
private:
bool allowSpeculativeCounterPromotion(Loop *LP) {
SmallVector<BasicBlock *, 8> ExitingBlocks;
L.getExitingBlocks(ExitingBlocks);
// Not considierered speculative.
if (ExitingBlocks.size() == 1)
return true;
if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
return false;
return true;
}
// Returns the max number of Counter Promotions for LP.
unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
// We can't insert into a catchswitch.
SmallVector<BasicBlock *, 8> LoopExitBlocks;
LP->getExitBlocks(LoopExitBlocks);
if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
return isa<CatchSwitchInst>(Exit->getTerminator());
}))
return 0;
if (!LP->hasDedicatedExits())
return 0;
BasicBlock *PH = LP->getLoopPreheader();
if (!PH)
return 0;
SmallVector<BasicBlock *, 8> ExitingBlocks;
LP->getExitingBlocks(ExitingBlocks);
// If BFI is set, we do more aggressive promotions based on BFI.
if (BFI)
return (unsigned)-1;
// Not considierered speculative.
if (ExitingBlocks.size() == 1)
return MaxNumOfPromotionsPerLoop;
if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
return 0;
// Whether the target block is in a loop does not matter:
if (SpeculativeCounterPromotionToLoop)
return MaxNumOfPromotionsPerLoop;
// Now check the target block:
unsigned MaxProm = MaxNumOfPromotionsPerLoop;
for (auto *TargetBlock : LoopExitBlocks) {
auto *TargetLoop = LI.getLoopFor(TargetBlock);
if (!TargetLoop)
continue;
unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop);
unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size();
MaxProm =
std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) -
PendingCandsInTarget);
}
return MaxProm;
}
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
SmallVector<BasicBlock *, 8> ExitBlocks;
SmallVector<Instruction *, 8> InsertPts;
Loop &L;
LoopInfo &LI;
BlockFrequencyInfo *BFI;
};
} // end anonymous namespace
PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) {
auto &TLI = AM.getResult<TargetLibraryAnalysis>(M);
if (!run(M, TLI))
return PreservedAnalyses::all();
return PreservedAnalyses::none();
}
char InstrProfilingLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(
InstrProfilingLegacyPass, "instrprof",
"Frontend instrumentation-based coverage lowering.", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(
InstrProfilingLegacyPass, "instrprof",
"Frontend instrumentation-based coverage lowering.", false, false)
ModulePass *
llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options,
bool IsCS) {
return new InstrProfilingLegacyPass(Options, IsCS);
}
static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) {
InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr);
if (Inc)
return Inc;
return dyn_cast<InstrProfIncrementInst>(Instr);
}
bool InstrProfiling::lowerIntrinsics(Function *F) {
bool MadeChange = false;
PromotionCandidates.clear();
for (BasicBlock &BB : *F) {
for (auto I = BB.begin(), E = BB.end(); I != E;) {
auto Instr = I++;
InstrProfIncrementInst *Inc = castToIncrementInst(&*Instr);
if (Inc) {
lowerIncrement(Inc);
MadeChange = true;
} else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(Instr)) {
lowerValueProfileInst(Ind);
MadeChange = true;
}
}
}
if (!MadeChange)
return false;
promoteCounterLoadStores(F);
return true;
}
bool InstrProfiling::isCounterPromotionEnabled() const {
if (DoCounterPromotion.getNumOccurrences() > 0)
return DoCounterPromotion;
return Options.DoCounterPromotion;
}
void InstrProfiling::promoteCounterLoadStores(Function *F) {
if (!isCounterPromotionEnabled())
return;
DominatorTree DT(*F);
LoopInfo LI(DT);
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates;
std::unique_ptr<BlockFrequencyInfo> BFI;
if (Options.UseBFIInPromotion) {
std::unique_ptr<BranchProbabilityInfo> BPI;
BPI.reset(new BranchProbabilityInfo(*F, LI, TLI));
BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI));
}
for (const auto &LoadStore : PromotionCandidates) {
auto *CounterLoad = LoadStore.first;
auto *CounterStore = LoadStore.second;
BasicBlock *BB = CounterLoad->getParent();
Loop *ParentLoop = LI.getLoopFor(BB);
if (!ParentLoop)
continue;
LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore);
}
SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
// Do a post-order traversal of the loops so that counter updates can be
// iteratively hoisted outside the loop nest.
for (auto *Loop : llvm::reverse(Loops)) {
PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get());
Promoter.run(&TotalCountersPromoted);
}
}
/// Check if the module contains uses of any profiling intrinsics.
static bool containsProfilingIntrinsics(Module &M) {
if (auto *F = M.getFunction(
Intrinsic::getName(llvm::Intrinsic::instrprof_increment)))
if (!F->use_empty())
return true;
if (auto *F = M.getFunction(
Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step)))
if (!F->use_empty())
return true;
if (auto *F = M.getFunction(
Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile)))
if (!F->use_empty())
return true;
return false;
}
bool InstrProfiling::run(Module &M, const TargetLibraryInfo &TLI) {
this->M = &M;
this->TLI = &TLI;
NamesVar = nullptr;
NamesSize = 0;
ProfileDataMap.clear();
UsedVars.clear();
getMemOPSizeRangeFromOption(MemOPSizeRange, MemOPSizeRangeStart,
MemOPSizeRangeLast);
TT = Triple(M.getTargetTriple());
// Emit the runtime hook even if no counters are present.
bool MadeChange = emitRuntimeHook();
// Improve compile time by avoiding linear scans when there is no work.
GlobalVariable *CoverageNamesVar =
M.getNamedGlobal(getCoverageUnusedNamesVarName());
if (!containsProfilingIntrinsics(M) && !CoverageNamesVar)
return MadeChange;
// We did not know how many value sites there would be inside
// the instrumented function. This is counting the number of instrumented
// target value sites to enter it as field in the profile data variable.
for (Function &F : M) {
InstrProfIncrementInst *FirstProfIncInst = nullptr;
for (BasicBlock &BB : F)
for (auto I = BB.begin(), E = BB.end(); I != E; I++)
if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I))
computeNumValueSiteCounts(Ind);
else if (FirstProfIncInst == nullptr)
FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I);
// Value profiling intrinsic lowering requires per-function profile data
// variable to be created first.
if (FirstProfIncInst != nullptr)
static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst));
}
for (Function &F : M)
MadeChange |= lowerIntrinsics(&F);
if (CoverageNamesVar) {
lowerCoverageData(CoverageNamesVar);
MadeChange = true;
}
if (!MadeChange)
return false;
emitVNodes();
emitNameData();
emitRegistration();
emitUses();
emitInitialization();
return true;
}
static FunctionCallee
getOrInsertValueProfilingCall(Module &M, const TargetLibraryInfo &TLI,
bool IsRange = false) {
LLVMContext &Ctx = M.getContext();
auto *ReturnTy = Type::getVoidTy(M.getContext());
AttributeList AL;
if (auto AK = TLI.getExtAttrForI32Param(false))
AL = AL.addParamAttribute(M.getContext(), 2, AK);
if (!IsRange) {
Type *ParamTypes[] = {
#define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
#include "llvm/ProfileData/InstrProfData.inc"
};
auto *ValueProfilingCallTy =
FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false);
return M.getOrInsertFunction(getInstrProfValueProfFuncName(),
ValueProfilingCallTy, AL);
} else {
Type *RangeParamTypes[] = {
#define VALUE_RANGE_PROF 1
#define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
#include "llvm/ProfileData/InstrProfData.inc"
#undef VALUE_RANGE_PROF
};
auto *ValueRangeProfilingCallTy =
FunctionType::get(ReturnTy, makeArrayRef(RangeParamTypes), false);
return M.getOrInsertFunction(getInstrProfValueRangeProfFuncName(),
ValueRangeProfilingCallTy, AL);
}
}
void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) {
GlobalVariable *Name = Ind->getName();
uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
uint64_t Index = Ind->getIndex()->getZExtValue();
auto It = ProfileDataMap.find(Name);
if (It == ProfileDataMap.end()) {
PerFunctionProfileData PD;
PD.NumValueSites[ValueKind] = Index + 1;
ProfileDataMap[Name] = PD;
} else if (It->second.NumValueSites[ValueKind] <= Index)
It->second.NumValueSites[ValueKind] = Index + 1;
}
void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
GlobalVariable *Name = Ind->getName();
auto It = ProfileDataMap.find(Name);
assert(It != ProfileDataMap.end() && It->second.DataVar &&
"value profiling detected in function with no counter incerement");
GlobalVariable *DataVar = It->second.DataVar;
uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
uint64_t Index = Ind->getIndex()->getZExtValue();
for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind)
Index += It->second.NumValueSites[Kind];
IRBuilder<> Builder(Ind);
bool IsRange = (Ind->getValueKind()->getZExtValue() ==
llvm::InstrProfValueKind::IPVK_MemOPSize);
CallInst *Call = nullptr;
if (!IsRange) {
Value *Args[3] = {Ind->getTargetValue(),
Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
Builder.getInt32(Index)};
Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args);
} else {
Value *Args[6] = {
Ind->getTargetValue(),
Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
Builder.getInt32(Index),
Builder.getInt64(MemOPSizeRangeStart),
Builder.getInt64(MemOPSizeRangeLast),
Builder.getInt64(MemOPSizeLarge == 0 ? INT64_MIN : MemOPSizeLarge)};
Call =
Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI, true), Args);
}
if (auto AK = TLI->getExtAttrForI32Param(false))
Call->addParamAttr(2, AK);
Ind->replaceAllUsesWith(Call);
Ind->eraseFromParent();
}
void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
GlobalVariable *Counters = getOrCreateRegionCounters(Inc);
IRBuilder<> Builder(Inc);
uint64_t Index = Inc->getIndex()->getZExtValue();
Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(),
Counters, 0, Index);
if (Options.Atomic || AtomicCounterUpdateAll) {
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
AtomicOrdering::Monotonic);
} else {
Value *IncStep = Inc->getStep();
Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
auto *Count = Builder.CreateAdd(Load, Inc->getStep());
auto *Store = Builder.CreateStore(Count, Addr);
if (isCounterPromotionEnabled())
PromotionCandidates.emplace_back(cast<Instruction>(Load), Store);
}
Inc->eraseFromParent();
}
void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
ConstantArray *Names =
cast<ConstantArray>(CoverageNamesVar->getInitializer());
for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) {
Constant *NC = Names->getOperand(I);
Value *V = NC->stripPointerCasts();
assert(isa<GlobalVariable>(V) && "Missing reference to function name");
GlobalVariable *Name = cast<GlobalVariable>(V);
Name->setLinkage(GlobalValue::PrivateLinkage);
ReferencedNames.push_back(Name);
NC->dropAllReferences();
}
CoverageNamesVar->eraseFromParent();
}
/// Get the name of a profiling variable for a particular function.
static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix) {
StringRef NamePrefix = getInstrProfNameVarPrefix();
StringRef Name = Inc->getName()->getName().substr(NamePrefix.size());
Function *F = Inc->getParent()->getParent();
Module *M = F->getParent();
if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) ||
!canRenameComdatFunc(*F))
return (Prefix + Name).str();
uint64_t FuncHash = Inc->getHash()->getZExtValue();
SmallVector<char, 24> HashPostfix;
if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
return (Prefix + Name).str();
return (Prefix + Name + "." + Twine(FuncHash)).str();
}
static inline bool shouldRecordFunctionAddr(Function *F) {
// Check the linkage
bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage();
if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
!HasAvailableExternallyLinkage)
return true;
// A function marked 'alwaysinline' with available_externally linkage can't
// have its address taken. Doing so would create an undefined external ref to
// the function, which would fail to link.
if (HasAvailableExternallyLinkage &&
F->hasFnAttribute(Attribute::AlwaysInline))
return false;
// Prohibit function address recording if the function is both internal and
// COMDAT. This avoids the profile data variable referencing internal symbols
// in COMDAT.
if (F->hasLocalLinkage() && F->hasComdat())
return false;
// Check uses of this function for other than direct calls or invokes to it.
// Inline virtual functions have linkeOnceODR linkage. When a key method
// exists, the vtable will only be emitted in the TU where the key method
// is defined. In a TU where vtable is not available, the function won't
// be 'addresstaken'. If its address is not recorded here, the profile data
// with missing address may be picked by the linker leading to missing
// indirect call target info.
return F->hasAddressTaken() || F->hasLinkOnceLinkage();
}
static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
// Don't do this for Darwin. compiler-rt uses linker magic.
if (TT.isOSDarwin())
return false;
// Use linker script magic to get data/cnts/name start/end.
if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() ||
TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() ||
TT.isOSWindows())
return false;
return true;
}
GlobalVariable *
InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
GlobalVariable *NamePtr = Inc->getName();
auto It = ProfileDataMap.find(NamePtr);
PerFunctionProfileData PD;
if (It != ProfileDataMap.end()) {
if (It->second.RegionCounters)
return It->second.RegionCounters;
PD = It->second;
}
- // Match the linkage and visibility of the name global, except on COFF, where
- // the linkage must be local and consequentially the visibility must be
- // default.
+ // Match the linkage and visibility of the name global. COFF supports using
+ // comdats with internal symbols, so do that if we can.
Function *Fn = Inc->getParent()->getParent();
GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage();
GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility();
if (TT.isOSBinFormatCOFF()) {
Linkage = GlobalValue::InternalLinkage;
Visibility = GlobalValue::DefaultVisibility;
}
// Move the name variable to the right section. Place them in a COMDAT group
// if the associated function is a COMDAT. This will make sure that only one
// copy of counters of the COMDAT function will be emitted after linking. Keep
// in mind that this pass may run before the inliner, so we need to create a
// new comdat group for the counters and profiling data. If we use the comdat
// of the parent function, that will result in relocations against discarded
// sections.
- Comdat *Cmdt = nullptr;
- GlobalValue::LinkageTypes CounterLinkage = Linkage;
- if (needsComdatForCounter(*Fn, *M)) {
- StringRef CmdtPrefix = getInstrProfComdatPrefix();
+ bool NeedComdat = needsComdatForCounter(*Fn, *M);
+ Comdat *Cmdt = nullptr; // Comdat group.
+ if (NeedComdat) {
if (TT.isOSBinFormatCOFF()) {
- // For COFF, the comdat group name must be the name of a symbol in the
- // group. Use the counter variable name, and upgrade its linkage to
- // something externally visible, like linkonce_odr.
- CmdtPrefix = getInstrProfCountersVarPrefix();
- CounterLinkage = GlobalValue::LinkOnceODRLinkage;
+ // For COFF, put the counters, data, and values each into their own
+ // comdats. We can't use a group because the Visual C++ linker will
+ // report duplicate symbol errors if there are multiple external symbols
+ // with the same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE.
+ Linkage = GlobalValue::LinkOnceODRLinkage;
+ Visibility = GlobalValue::HiddenVisibility;
+ } else {
+ // Otherwise, create one comdat group for everything.
+ Cmdt = M->getOrInsertComdat(getVarName(Inc, getInstrProfComdatPrefix()));
}
- Cmdt = M->getOrInsertComdat(getVarName(Inc, CmdtPrefix));
}
+ auto MaybeSetComdat = [=](GlobalVariable *GV) {
+ if (NeedComdat)
+ GV->setComdat(Cmdt ? Cmdt : M->getOrInsertComdat(GV->getName()));
+ };
uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
LLVMContext &Ctx = M->getContext();
ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
// Create the counters variable.
auto *CounterPtr =
new GlobalVariable(*M, CounterTy, false, Linkage,
Constant::getNullValue(CounterTy),
getVarName(Inc, getInstrProfCountersVarPrefix()));
CounterPtr->setVisibility(Visibility);
CounterPtr->setSection(
getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat()));
CounterPtr->setAlignment(8);
- CounterPtr->setComdat(Cmdt);
- CounterPtr->setLinkage(CounterLinkage);
+ MaybeSetComdat(CounterPtr);
+ CounterPtr->setLinkage(Linkage);
auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
// Allocate statically the array of pointers to value profile nodes for
// the current function.
Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy);
if (ValueProfileStaticAlloc && !needsRuntimeRegistrationOfSectionRange(TT)) {
uint64_t NS = 0;
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
NS += PD.NumValueSites[Kind];
if (NS) {
ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS);
auto *ValuesVar =
new GlobalVariable(*M, ValuesTy, false, Linkage,
Constant::getNullValue(ValuesTy),
getVarName(Inc, getInstrProfValuesVarPrefix()));
ValuesVar->setVisibility(Visibility);
ValuesVar->setSection(
getInstrProfSectionName(IPSK_vals, TT.getObjectFormat()));
ValuesVar->setAlignment(8);
- ValuesVar->setComdat(Cmdt);
+ MaybeSetComdat(ValuesVar);
ValuesPtrExpr =
ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
}
}
// Create data variable.
auto *Int16Ty = Type::getInt16Ty(Ctx);
auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1);
Type *DataTypes[] = {
#define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType,
#include "llvm/ProfileData/InstrProfData.inc"
};
auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes));
Constant *FunctionAddr = shouldRecordFunctionAddr(Fn)
? ConstantExpr::getBitCast(Fn, Int8PtrTy)
: ConstantPointerNull::get(Int8PtrTy);
Constant *Int16ArrayVals[IPVK_Last + 1];
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]);
Constant *DataVals[] = {
#define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init,
#include "llvm/ProfileData/InstrProfData.inc"
};
auto *Data = new GlobalVariable(*M, DataTy, false, Linkage,
ConstantStruct::get(DataTy, DataVals),
getVarName(Inc, getInstrProfDataVarPrefix()));
Data->setVisibility(Visibility);
Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat()));
Data->setAlignment(INSTR_PROF_DATA_ALIGNMENT);
- Data->setComdat(Cmdt);
+ MaybeSetComdat(Data);
+ Data->setLinkage(Linkage);
PD.RegionCounters = CounterPtr;
PD.DataVar = Data;
ProfileDataMap[NamePtr] = PD;
// Mark the data variable as used so that it isn't stripped out.
UsedVars.push_back(Data);
// Now that the linkage set by the FE has been passed to the data and counter
// variables, reset Name variable's linkage and visibility to private so that
// it can be removed later by the compiler.
NamePtr->setLinkage(GlobalValue::PrivateLinkage);
// Collect the referenced names to be used by emitNameData.
ReferencedNames.push_back(NamePtr);
return CounterPtr;
}
void InstrProfiling::emitVNodes() {
if (!ValueProfileStaticAlloc)
return;
// For now only support this on platforms that do
// not require runtime registration to discover
// named section start/end.
if (needsRuntimeRegistrationOfSectionRange(TT))
return;
size_t TotalNS = 0;
for (auto &PD : ProfileDataMap) {
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
TotalNS += PD.second.NumValueSites[Kind];
}
if (!TotalNS)
return;
uint64_t NumCounters = TotalNS * NumCountersPerValueSite;
// Heuristic for small programs with very few total value sites.
// The default value of vp-counters-per-site is chosen based on
// the observation that large apps usually have a low percentage
// of value sites that actually have any profile data, and thus
// the average number of counters per site is low. For small
// apps with very few sites, this may not be true. Bump up the
// number of counters in this case.
#define INSTR_PROF_MIN_VAL_COUNTS 10
if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS)
NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2);
auto &Ctx = M->getContext();
Type *VNodeTypes[] = {
#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType,
#include "llvm/ProfileData/InstrProfData.inc"
};
auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes));
ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters);
auto *VNodesVar = new GlobalVariable(
*M, VNodesTy, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName());
VNodesVar->setSection(
getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat()));
UsedVars.push_back(VNodesVar);
}
void InstrProfiling::emitNameData() {
std::string UncompressedData;
if (ReferencedNames.empty())
return;
std::string CompressedNameStr;
if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr,
DoNameCompression)) {
report_fatal_error(toString(std::move(E)), false);
}
auto &Ctx = M->getContext();
auto *NamesVal = ConstantDataArray::getString(
Ctx, StringRef(CompressedNameStr), false);
NamesVar = new GlobalVariable(*M, NamesVal->getType(), true,
GlobalValue::PrivateLinkage, NamesVal,
getInstrProfNamesVarName());
NamesSize = CompressedNameStr.size();
NamesVar->setSection(
getInstrProfSectionName(IPSK_name, TT.getObjectFormat()));
// On COFF, it's important to reduce the alignment down to 1 to prevent the
// linker from inserting padding before the start of the names section or
// between names entries.
NamesVar->setAlignment(1);
UsedVars.push_back(NamesVar);
for (auto *NamePtr : ReferencedNames)
NamePtr->eraseFromParent();
}
void InstrProfiling::emitRegistration() {
if (!needsRuntimeRegistrationOfSectionRange(TT))
return;
// Construct the function.
auto *VoidTy = Type::getVoidTy(M->getContext());
auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext());
auto *Int64Ty = Type::getInt64Ty(M->getContext());
auto *RegisterFTy = FunctionType::get(VoidTy, false);
auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage,
getInstrProfRegFuncsName(), M);
RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
if (Options.NoRedZone)
RegisterF->addFnAttr(Attribute::NoRedZone);
auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false);
auto *RuntimeRegisterF =
Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage,
getInstrProfRegFuncName(), M);
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF));
for (Value *Data : UsedVars)
if (Data != NamesVar && !isa<Function>(Data))
IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
if (NamesVar) {
Type *ParamTypes[] = {VoidPtrTy, Int64Ty};
auto *NamesRegisterTy =
FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false);
auto *NamesRegisterF =
Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage,
getInstrProfNamesRegFuncName(), M);
IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy),
IRB.getInt64(NamesSize)});
}
IRB.CreateRetVoid();
}
bool InstrProfiling::emitRuntimeHook() {
// We expect the linker to be invoked with -u<hook_var> flag for linux,
// for which case there is no need to emit the user function.
if (TT.isOSLinux())
return false;
// If the module's provided its own runtime, we don't need to do anything.
if (M->getGlobalVariable(getInstrProfRuntimeHookVarName()))
return false;
// Declare an external variable that will pull in the runtime initialization.
auto *Int32Ty = Type::getInt32Ty(M->getContext());
auto *Var =
new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage,
nullptr, getInstrProfRuntimeHookVarName());
// Make a function that uses it.
auto *User = Function::Create(FunctionType::get(Int32Ty, false),
GlobalValue::LinkOnceODRLinkage,
getInstrProfRuntimeHookVarUseFuncName(), M);
User->addFnAttr(Attribute::NoInline);
if (Options.NoRedZone)
User->addFnAttr(Attribute::NoRedZone);
User->setVisibility(GlobalValue::HiddenVisibility);
if (TT.supportsCOMDAT())
User->setComdat(M->getOrInsertComdat(User->getName()));
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
auto *Load = IRB.CreateLoad(Int32Ty, Var);
IRB.CreateRet(Load);
// Mark the user variable as used so that it isn't stripped out.
UsedVars.push_back(User);
return true;
}
void InstrProfiling::emitUses() {
if (!UsedVars.empty())
appendToUsed(*M, UsedVars);
}
void InstrProfiling::emitInitialization() {
// Create ProfileFileName variable. Don't don't this for the
// context-sensitive instrumentation lowering: This lowering is after
// LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should
// have already create the variable before LTO/ThinLTO linking.
if (!IsCS)
createProfileFileNameVar(*M, Options.InstrProfileOutput);
Function *RegisterF = M->getFunction(getInstrProfRegFuncsName());
if (!RegisterF)
return;
// Create the initialization function.
auto *VoidTy = Type::getVoidTy(M->getContext());
auto *F = Function::Create(FunctionType::get(VoidTy, false),
GlobalValue::InternalLinkage,
getInstrProfInitFuncName(), M);
F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
F->addFnAttr(Attribute::NoInline);
if (Options.NoRedZone)
F->addFnAttr(Attribute::NoRedZone);
// Add the basic block and the necessary calls.
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F));
IRB.CreateCall(RegisterF, {});
IRB.CreateRetVoid();
appendToGlobalCtors(*M, F, 0);
}
Index: stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp (revision 356465)
@@ -1,2589 +1,2627 @@
//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs global value numbering to eliminate fully redundant
// instructions. It also performs simple dead load elimination.
//
// Note that this pass does the value numbering itself; it does not use the
// ValueNumbering analysis passes.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Transforms/Utils/VNCoercion.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>
#include <vector>
using namespace llvm;
using namespace llvm::gvn;
using namespace llvm::VNCoercion;
using namespace PatternMatch;
#define DEBUG_TYPE "gvn"
STATISTIC(NumGVNInstr, "Number of instructions deleted");
STATISTIC(NumGVNLoad, "Number of loads deleted");
STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
STATISTIC(NumGVNBlocks, "Number of blocks merged");
STATISTIC(NumGVNSimpl, "Number of instructions simplified");
STATISTIC(NumGVNEqProp, "Number of equalities propagated");
STATISTIC(NumPRELoad, "Number of loads PRE'd");
static cl::opt<bool> EnablePRE("enable-pre",
cl::init(true), cl::Hidden);
static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
static cl::opt<bool> EnableMemDep("enable-gvn-memdep", cl::init(true));
// Maximum allowed recursion depth.
static cl::opt<uint32_t>
MaxRecurseDepth("gvn-max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore,
cl::desc("Max recurse depth in GVN (default = 1000)"));
static cl::opt<uint32_t> MaxNumDeps(
"gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
struct llvm::GVN::Expression {
uint32_t opcode;
Type *type;
bool commutative = false;
SmallVector<uint32_t, 4> varargs;
Expression(uint32_t o = ~2U) : opcode(o) {}
bool operator==(const Expression &other) const {
if (opcode != other.opcode)
return false;
if (opcode == ~0U || opcode == ~1U)
return true;
if (type != other.type)
return false;
if (varargs != other.varargs)
return false;
return true;
}
friend hash_code hash_value(const Expression &Value) {
return hash_combine(
Value.opcode, Value.type,
hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
}
};
namespace llvm {
template <> struct DenseMapInfo<GVN::Expression> {
static inline GVN::Expression getEmptyKey() { return ~0U; }
static inline GVN::Expression getTombstoneKey() { return ~1U; }
static unsigned getHashValue(const GVN::Expression &e) {
using llvm::hash_value;
return static_cast<unsigned>(hash_value(e));
}
static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
return LHS == RHS;
}
};
} // end namespace llvm
/// Represents a particular available value that we know how to materialize.
/// Materialization of an AvailableValue never fails. An AvailableValue is
/// implicitly associated with a rematerialization point which is the
/// location of the instruction from which it was formed.
struct llvm::gvn::AvailableValue {
enum ValType {
SimpleVal, // A simple offsetted value that is accessed.
LoadVal, // A value produced by a load.
MemIntrin, // A memory intrinsic which is loaded from.
UndefVal // A UndefValue representing a value from dead block (which
// is not yet physically removed from the CFG).
};
/// V - The value that is live out of the block.
PointerIntPair<Value *, 2, ValType> Val;
/// Offset - The byte offset in Val that is interesting for the load query.
unsigned Offset;
static AvailableValue get(Value *V, unsigned Offset = 0) {
AvailableValue Res;
Res.Val.setPointer(V);
Res.Val.setInt(SimpleVal);
Res.Offset = Offset;
return Res;
}
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
AvailableValue Res;
Res.Val.setPointer(MI);
Res.Val.setInt(MemIntrin);
Res.Offset = Offset;
return Res;
}
static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) {
AvailableValue Res;
Res.Val.setPointer(LI);
Res.Val.setInt(LoadVal);
Res.Offset = Offset;
return Res;
}
static AvailableValue getUndef() {
AvailableValue Res;
Res.Val.setPointer(nullptr);
Res.Val.setInt(UndefVal);
Res.Offset = 0;
return Res;
}
bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
bool isUndefValue() const { return Val.getInt() == UndefVal; }
Value *getSimpleValue() const {
assert(isSimpleValue() && "Wrong accessor");
return Val.getPointer();
}
LoadInst *getCoercedLoadValue() const {
assert(isCoercedLoadValue() && "Wrong accessor");
return cast<LoadInst>(Val.getPointer());
}
MemIntrinsic *getMemIntrinValue() const {
assert(isMemIntrinValue() && "Wrong accessor");
return cast<MemIntrinsic>(Val.getPointer());
}
/// Emit code at the specified insertion point to adjust the value defined
/// here to the specified type. This handles various coercion cases.
Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt,
GVN &gvn) const;
};
/// Represents an AvailableValue which can be rematerialized at the end of
/// the associated BasicBlock.
struct llvm::gvn::AvailableValueInBlock {
/// BB - The basic block in question.
BasicBlock *BB;
/// AV - The actual available value
AvailableValue AV;
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
AvailableValueInBlock Res;
Res.BB = BB;
Res.AV = std::move(AV);
return Res;
}
static AvailableValueInBlock get(BasicBlock *BB, Value *V,
unsigned Offset = 0) {
return get(BB, AvailableValue::get(V, Offset));
}
static AvailableValueInBlock getUndef(BasicBlock *BB) {
return get(BB, AvailableValue::getUndef());
}
/// Emit code at the end of this block to adjust the value defined here to
/// the specified type. This handles various coercion cases.
Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const {
return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
}
};
//===----------------------------------------------------------------------===//
// ValueTable Internal Functions
//===----------------------------------------------------------------------===//
GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
Expression e;
e.type = I->getType();
e.opcode = I->getOpcode();
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
e.varargs.push_back(lookupOrAdd(*OI));
if (I->isCommutative()) {
// Ensure that commutative instructions that only differ by a permutation
// of their operands get the same value number by sorting the operand value
// numbers. Since all commutative instructions have two operands it is more
// efficient to sort by hand rather than using, say, std::sort.
assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
if (e.varargs[0] > e.varargs[1])
std::swap(e.varargs[0], e.varargs[1]);
e.commutative = true;
}
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
// Sort the operand value numbers so x<y and y>x get the same value number.
CmpInst::Predicate Predicate = C->getPredicate();
if (e.varargs[0] > e.varargs[1]) {
std::swap(e.varargs[0], e.varargs[1]);
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
e.opcode = (C->getOpcode() << 8) | Predicate;
e.commutative = true;
} else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) {
for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
II != IE; ++II)
e.varargs.push_back(*II);
}
return e;
}
GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
CmpInst::Predicate Predicate,
Value *LHS, Value *RHS) {
assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
"Not a comparison!");
Expression e;
e.type = CmpInst::makeCmpResultType(LHS->getType());
e.varargs.push_back(lookupOrAdd(LHS));
e.varargs.push_back(lookupOrAdd(RHS));
// Sort the operand value numbers so x<y and y>x get the same value number.
if (e.varargs[0] > e.varargs[1]) {
std::swap(e.varargs[0], e.varargs[1]);
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
e.opcode = (Opcode << 8) | Predicate;
e.commutative = true;
return e;
}
GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
assert(EI && "Not an ExtractValueInst?");
Expression e;
e.type = EI->getType();
e.opcode = 0;
WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
// EI is an extract from one of our with.overflow intrinsics. Synthesize
// a semantically equivalent expression instead of an extract value
// expression.
e.opcode = WO->getBinaryOp();
e.varargs.push_back(lookupOrAdd(WO->getLHS()));
e.varargs.push_back(lookupOrAdd(WO->getRHS()));
return e;
}
// Not a recognised intrinsic. Fall back to producing an extract value
// expression.
e.opcode = EI->getOpcode();
for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end();
OI != OE; ++OI)
e.varargs.push_back(lookupOrAdd(*OI));
for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end();
II != IE; ++II)
e.varargs.push_back(*II);
return e;
}
//===----------------------------------------------------------------------===//
// ValueTable External Functions
//===----------------------------------------------------------------------===//
GVN::ValueTable::ValueTable() = default;
GVN::ValueTable::ValueTable(const ValueTable &) = default;
GVN::ValueTable::ValueTable(ValueTable &&) = default;
GVN::ValueTable::~ValueTable() = default;
/// add - Insert a value into the table with a specified value number.
void GVN::ValueTable::add(Value *V, uint32_t num) {
valueNumbering.insert(std::make_pair(V, num));
if (PHINode *PN = dyn_cast<PHINode>(V))
NumberingPhi[num] = PN;
}
uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
if (AA->doesNotAccessMemory(C)) {
Expression exp = createExpr(C);
uint32_t e = assignExpNewValueNum(exp).first;
valueNumbering[C] = e;
return e;
} else if (MD && AA->onlyReadsMemory(C)) {
Expression exp = createExpr(C);
auto ValNum = assignExpNewValueNum(exp);
if (ValNum.second) {
valueNumbering[C] = ValNum.first;
return ValNum.first;
}
MemDepResult local_dep = MD->getDependency(C);
if (!local_dep.isDef() && !local_dep.isNonLocal()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
if (local_dep.isDef()) {
CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
}
uint32_t v = lookupOrAdd(local_cdep);
valueNumbering[C] = v;
return v;
}
// Non-local case.
const MemoryDependenceResults::NonLocalDepInfo &deps =
MD->getNonLocalCallDependency(C);
// FIXME: Move the checking logic to MemDep!
CallInst* cdep = nullptr;
// Check to see if we have a single dominating call instruction that is
// identical to C.
for (unsigned i = 0, e = deps.size(); i != e; ++i) {
const NonLocalDepEntry *I = &deps[i];
if (I->getResult().isNonLocal())
continue;
// We don't handle non-definitions. If we already have a call, reject
// instruction dependencies.
if (!I->getResult().isDef() || cdep != nullptr) {
cdep = nullptr;
break;
}
CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
// FIXME: All duplicated with non-local case.
if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
cdep = NonLocalDepCall;
continue;
}
cdep = nullptr;
break;
}
if (!cdep) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
}
uint32_t v = lookupOrAdd(cdep);
valueNumbering[C] = v;
return v;
} else {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
}
/// Returns true if a value number exists for the specified value.
bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
/// lookup_or_add - Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
if (VI != valueNumbering.end())
return VI->second;
if (!isa<Instruction>(V)) {
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
Instruction* I = cast<Instruction>(V);
Expression exp;
switch (I->getOpcode()) {
case Instruction::Call:
return lookupOrAddCall(cast<CallInst>(I));
case Instruction::FNeg:
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::AddrSpaceCast:
case Instruction::BitCast:
case Instruction::Select:
case Instruction::ExtractElement:
case Instruction::InsertElement:
case Instruction::ShuffleVector:
case Instruction::InsertValue:
case Instruction::GetElementPtr:
exp = createExpr(I);
break;
case Instruction::ExtractValue:
exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
break;
case Instruction::PHI:
valueNumbering[V] = nextValueNumber;
NumberingPhi[nextValueNumber] = cast<PHINode>(V);
return nextValueNumber++;
default:
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
uint32_t e = assignExpNewValueNum(exp).first;
valueNumbering[V] = e;
return e;
}
/// Returns the value number of the specified value. Fails if
/// the value has not yet been numbered.
uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
if (Verify) {
assert(VI != valueNumbering.end() && "Value not numbered?");
return VI->second;
}
return (VI != valueNumbering.end()) ? VI->second : 0;
}
/// Returns the value number of the given comparison,
/// assigning it a new number if it did not have one before. Useful when
/// we deduced the result of a comparison, but don't immediately have an
/// instruction realizing that comparison to hand.
uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
CmpInst::Predicate Predicate,
Value *LHS, Value *RHS) {
Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
return assignExpNewValueNum(exp).first;
}
/// Remove all entries from the ValueTable.
void GVN::ValueTable::clear() {
valueNumbering.clear();
expressionNumbering.clear();
NumberingPhi.clear();
PhiTranslateTable.clear();
nextValueNumber = 1;
Expressions.clear();
ExprIdx.clear();
nextExprNumber = 0;
}
/// Remove a value from the value numbering.
void GVN::ValueTable::erase(Value *V) {
uint32_t Num = valueNumbering.lookup(V);
valueNumbering.erase(V);
// If V is PHINode, V <--> value number is an one-to-one mapping.
if (isa<PHINode>(V))
NumberingPhi.erase(Num);
}
/// verifyRemoved - Verify that the value is removed from all internal data
/// structures.
void GVN::ValueTable::verifyRemoved(const Value *V) const {
for (DenseMap<Value*, uint32_t>::const_iterator
I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
assert(I->first != V && "Inst still occurs in value numbering map!");
}
}
//===----------------------------------------------------------------------===//
// GVN Pass
//===----------------------------------------------------------------------===//
PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
// FIXME: The order of evaluation of these 'getResult' calls is very
// significant! Re-ordering these variables will cause GVN when run alone to
// be less effective! We should fix memdep and basic-aa to not exhibit this
// behavior, but until then don't change the order here.
auto &AC = AM.getResult<AssumptionAnalysis>(F);
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
auto &AA = AM.getResult<AAManager>(F);
auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F);
auto *LI = AM.getCachedResult<LoopAnalysis>(F);
auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE);
if (!Changed)
return PreservedAnalyses::all();
PreservedAnalyses PA;
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<GlobalsAA>();
PA.preserve<TargetLibraryAnalysis>();
return PA;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
errs() << "{\n";
for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
E = d.end(); I != E; ++I) {
errs() << I->first << "\n";
I->second->dump();
}
errs() << "}\n";
}
#endif
/// Return true if we can prove that the value
/// we're analyzing is fully available in the specified block. As we go, keep
/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
/// map is actually a tri-state map with the following values:
/// 0) we know the block *is not* fully available.
/// 1) we know the block *is* fully available.
/// 2) we do not know whether the block is fully available or not, but we are
/// currently speculating that it will be.
/// 3) we are speculating for this block and have used that to speculate for
/// other blocks.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
DenseMap<BasicBlock*, char> &FullyAvailableBlocks,
uint32_t RecurseDepth) {
if (RecurseDepth > MaxRecurseDepth)
return false;
// Optimistically assume that the block is fully available and check to see
// if we already know about this block in one lookup.
std::pair<DenseMap<BasicBlock*, char>::iterator, bool> IV =
FullyAvailableBlocks.insert(std::make_pair(BB, 2));
// If the entry already existed for this block, return the precomputed value.
if (!IV.second) {
// If this is a speculative "available" value, mark it as being used for
// speculation of other blocks.
if (IV.first->second == 2)
IV.first->second = 3;
return IV.first->second != 0;
}
// Otherwise, see if it is fully available in all predecessors.
pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
// If this block has no predecessors, it isn't live-in here.
if (PI == PE)
goto SpeculationFailure;
for (; PI != PE; ++PI)
// If the value isn't fully available in one of our predecessors, then it
// isn't fully available in this block either. Undo our previous
// optimistic assumption and bail out.
if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1))
goto SpeculationFailure;
return true;
// If we get here, we found out that this is not, after
// all, a fully-available block. We have a problem if we speculated on this and
// used the speculation to mark other blocks as available.
SpeculationFailure:
char &BBVal = FullyAvailableBlocks[BB];
// If we didn't speculate on this, just return with it set to false.
if (BBVal == 2) {
BBVal = 0;
return false;
}
// If we did speculate on this value, we could have blocks set to 1 that are
// incorrect. Walk the (transitive) successors of this block and mark them as
// 0 if set to one.
SmallVector<BasicBlock*, 32> BBWorklist;
BBWorklist.push_back(BB);
do {
BasicBlock *Entry = BBWorklist.pop_back_val();
// Note that this sets blocks to 0 (unavailable) if they happen to not
// already be in FullyAvailableBlocks. This is safe.
char &EntryVal = FullyAvailableBlocks[Entry];
if (EntryVal == 0) continue; // Already unavailable.
// Mark as unavailable.
EntryVal = 0;
BBWorklist.append(succ_begin(Entry), succ_end(Entry));
} while (!BBWorklist.empty());
return false;
}
/// Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
GVN &gvn) {
// Check for the fully redundant, dominating load case. In this case, we can
// just use the dominating value directly.
if (ValuesPerBlock.size() == 1 &&
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent())) {
assert(!ValuesPerBlock[0].AV.isUndefValue() &&
"Dead BB dominate this block");
return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
}
// Otherwise, we have to construct SSA form.
SmallVector<PHINode*, 8> NewPHIs;
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
for (const AvailableValueInBlock &AV : ValuesPerBlock) {
BasicBlock *BB = AV.BB;
if (SSAUpdate.HasValueForBlock(BB))
continue;
// If the value is the load that we will be eliminating, and the block it's
// available in is the block that the load is in, then don't add it as
// SSAUpdater will resolve the value to the relevant phi which may let it
// avoid phi construction entirely if there's actually only one value.
if (BB == LI->getParent() &&
((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) ||
(AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI)))
continue;
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
}
// Perform PHI construction.
return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
}
Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
Instruction *InsertPt,
GVN &gvn) const {
Value *Res;
Type *LoadTy = LI->getType();
const DataLayout &DL = LI->getModule()->getDataLayout();
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
<< " " << *getSimpleValue() << '\n'
<< *Res << '\n'
<< "\n\n\n");
}
} else if (isCoercedLoadValue()) {
LoadInst *Load = getCoercedLoadValue();
if (Load->getType() == LoadTy && Offset == 0) {
Res = Load;
} else {
Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
// We would like to use gvn.markInstructionForDeletion here, but we can't
// because the load is already memoized into the leader map table that GVN
// tracks. It is potentially possible to remove the load from the table,
// but then there all of the operations based on it would need to be
// rehashed. Just leave the dead load around.
gvn.getMemDep().removeInstruction(Load);
LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
<< " " << *getCoercedLoadValue() << '\n'
<< *Res << '\n'
<< "\n\n\n");
}
} else if (isMemIntrinValue()) {
Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
InsertPt, DL);
LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n'
<< "\n\n\n");
} else {
assert(isUndefValue() && "Should be UndefVal");
LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
return UndefValue::get(LoadTy);
}
assert(Res && "failed to materialize?");
return Res;
}
static bool isLifetimeStart(const Instruction *Inst) {
if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
return II->getIntrinsicID() == Intrinsic::lifetime_start;
return false;
}
/// Try to locate the three instruction involved in a missed
/// load-elimination case that is due to an intervening store.
static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo,
DominatorTree *DT,
OptimizationRemarkEmitter *ORE) {
using namespace ore;
User *OtherAccess = nullptr;
OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI);
R << "load of type " << NV("Type", LI->getType()) << " not eliminated"
<< setExtraArgs();
for (auto *U : LI->getPointerOperand()->users())
if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
DT->dominates(cast<Instruction>(U), LI)) {
// FIXME: for now give up if there are multiple memory accesses that
// dominate the load. We need further analysis to decide which one is
// that we're forwarding from.
if (OtherAccess)
OtherAccess = nullptr;
else
OtherAccess = U;
}
if (OtherAccess)
R << " in favor of " << NV("OtherAccess", OtherAccess);
R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
ORE->emit(R);
}
bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
Value *Address, AvailableValue &Res) {
assert((DepInfo.isDef() || DepInfo.isClobber()) &&
"expected a local dependence");
assert(LI->isUnordered() && "rules below are incorrect for ordered access");
const DataLayout &DL = LI->getModule()->getDataLayout();
Instruction *DepInst = DepInfo.getInst();
if (DepInfo.isClobber()) {
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
// Can't forward from non-atomic to atomic without violating memory model.
if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
int Offset =
analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL);
if (Offset != -1) {
Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
return true;
}
}
}
// Check to see if we have something like this:
// load i32* P
// load i8* (P+1)
// if we have this, replace the later with an extraction from the former.
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
// Can't forward from non-atomic to atomic without violating memory model.
if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
int Offset =
analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
if (Offset != -1) {
Res = AvailableValue::getLoad(DepLI, Offset);
return true;
}
}
}
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
if (Address && !LI->isAtomic()) {
int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
DepMI, DL);
if (Offset != -1) {
Res = AvailableValue::getMI(DepMI, Offset);
return true;
}
}
}
// Nothing known about this clobber, have to be conservative
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
dbgs() << " is clobbered by " << *DepInst << '\n';);
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
return false;
}
assert(DepInfo.isDef() && "follows from above");
// Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
Res = AvailableValue::get(UndefValue::get(LI->getType()));
return true;
}
// Loading from calloc (which zero initializes memory) -> zero
if (isCallocLikeFn(DepInst, TLI)) {
Res = AvailableValue::get(Constant::getNullValue(LI->getType()));
return true;
}
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to. If the stored value is larger or equal to
// the loaded value, we can reuse it.
if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
DL))
return false;
// Can't forward from non-atomic to atomic without violating memory model.
if (S->isAtomic() < LI->isAtomic())
return false;
Res = AvailableValue::get(S->getValueOperand());
return true;
}
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
// If the stored value is larger or equal to the loaded value, we can reuse
// it.
if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
return false;
// Can't forward from non-atomic to atomic without violating memory model.
if (LD->isAtomic() < LI->isAtomic())
return false;
Res = AvailableValue::getLoad(LD);
return true;
}
// Unknown def - must be conservative
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
dbgs() << " has unknown def " << *DepInst << '\n';);
return false;
}
void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks) {
// Filter out useless results (non-locals, etc). Keep track of the blocks
// where we have a value available in repl, also keep track of whether we see
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
unsigned NumDeps = Deps.size();
for (unsigned i = 0, e = NumDeps; i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
if (DeadBlocks.count(DepBB)) {
// Dead dependent mem-op disguise as a load evaluating the same value
// as the load in question.
ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
continue;
}
if (!DepInfo.isDef() && !DepInfo.isClobber()) {
UnavailableBlocks.push_back(DepBB);
continue;
}
// The address being loaded in this non-local block may not be the same as
// the pointer operand of the load if PHI translation occurs. Make sure
// to consider the right address.
Value *Address = Deps[i].getAddress();
AvailableValue AV;
if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
// subtlety: because we know this was a non-local dependency, we know
// it's safe to materialize anywhere between the instruction within
// DepInfo and the end of it's block.
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
std::move(AV)));
} else {
UnavailableBlocks.push_back(DepBB);
}
}
assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
"post condition violation");
}
bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks) {
// Okay, we have *some* definitions of the value. This means that the value
// is available in some of our (transitive) predecessors. Lets think about
// doing PRE of this load. This will involve inserting a new load into the
// predecessor when it's not available. We could do this in general, but
// prefer to not increase code size. As such, we only do this when we know
// that we only have to insert *one* load (which means we're basically moving
// the load, not inserting a new one).
SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
UnavailableBlocks.end());
// Let's find the first basic block with more than one predecessor. Walk
// backwards through predecessors if needed.
BasicBlock *LoadBB = LI->getParent();
BasicBlock *TmpBB = LoadBB;
bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI);
// Check that there is no implicit control flow instructions above our load in
// its block. If there is an instruction that doesn't always pass the
// execution to the following instruction, then moving through it may become
// invalid. For example:
//
// int arr[LEN];
// int index = ???;
// ...
// guard(0 <= index && index < LEN);
// use(arr[index]);
//
// It is illegal to move the array access to any point above the guard,
// because if the index is out of bounds we should deoptimize rather than
// access the array.
// Check that there is no guard in this block above our instruction.
if (!IsSafeToSpeculativelyExecute && ICF->isDominatedByICFIFromSameBlock(LI))
return false;
while (TmpBB->getSinglePredecessor()) {
TmpBB = TmpBB->getSinglePredecessor();
if (TmpBB == LoadBB) // Infinite (unreachable) loop.
return false;
if (Blockers.count(TmpBB))
return false;
// If any of these blocks has more than one successor (i.e. if the edge we
// just traversed was critical), then there are other paths through this
// block along which the load may not be anticipated. Hoisting the load
// above this block would be adding the load to execution paths along
// which it was not previously executed.
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
return false;
// Check that there is no implicit control flow in a block above.
if (!IsSafeToSpeculativelyExecute && ICF->hasICF(TmpBB))
return false;
}
assert(TmpBB);
LoadBB = TmpBB;
// Check to see how many predecessors have the loaded value fully
// available.
MapVector<BasicBlock *, Value *> PredLoads;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (const AvailableValueInBlock &AV : ValuesPerBlock)
FullyAvailableBlocks[AV.BB] = true;
for (BasicBlock *UnavailableBB : UnavailableBlocks)
FullyAvailableBlocks[UnavailableBB] = false;
SmallVector<BasicBlock *, 4> CriticalEdgePred;
for (BasicBlock *Pred : predecessors(LoadBB)) {
// If any predecessor block is an EH pad that does not allow non-PHI
// instructions before the terminator, we can't PRE the load.
if (Pred->getTerminator()->isEHPad()) {
LLVM_DEBUG(
dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
if (Pred->getTerminator()->getNumSuccessors() != 1) {
if (isa<IndirectBrInst>(Pred->getTerminator())) {
LLVM_DEBUG(
dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
// FIXME: Can we support the fallthrough edge?
if (isa<CallBrInst>(Pred->getTerminator())) {
LLVM_DEBUG(
dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
if (LoadBB->isEHPad()) {
LLVM_DEBUG(
dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
CriticalEdgePred.push_back(Pred);
} else {
// Only add the predecessors that will not be split for now.
PredLoads[Pred] = nullptr;
}
}
// Decide whether PRE is profitable for this load.
unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should already be eliminated!");
// If this load is unavailable in multiple predecessors, reject it.
// FIXME: If we could restructure the CFG, we could make a common pred with
// all the preds that don't have an available LI and insert a new load into
// that one block.
if (NumUnavailablePreds != 1)
return false;
// Split critical edges, and update the unavailable predecessors accordingly.
for (BasicBlock *OrigPred : CriticalEdgePred) {
BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
PredLoads[NewPred] = nullptr;
LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
<< LoadBB->getName() << '\n');
}
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
const DataLayout &DL = LI->getModule()->getDataLayout();
SmallVector<Instruction*, 8> NewInsts;
for (auto &PredLoad : PredLoads) {
BasicBlock *UnavailablePred = PredLoad.first;
// Do PHI translation to get its value in the predecessor if necessary. The
// returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
// If all preds have a single successor, then we know it is safe to insert
// the load on the pred (?!?), so we can insert code to materialize the
// pointer if it is not available.
PHITransAddr Address(LI->getPointerOperand(), DL, AC);
Value *LoadPtr = nullptr;
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
*DT, NewInsts);
// If we couldn't find or insert a computation of this phi translated value,
// we fail PRE.
if (!LoadPtr) {
LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
<< *LI->getPointerOperand() << "\n");
CanDoPRE = false;
break;
}
PredLoad.second = LoadPtr;
}
if (!CanDoPRE) {
while (!NewInsts.empty()) {
Instruction *I = NewInsts.pop_back_val();
markInstructionForDeletion(I);
}
// HINT: Don't revert the edge-splitting as following transformation may
// also need to split these critical edges.
return !CriticalEdgePred.empty();
}
// Okay, we can eliminate this load by inserting a reload in the predecessor
// and using PHI construction to get the value in the other predecessors, do
// it.
LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
<< "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
<< '\n');
// Assign value numbers to the new instructions.
for (Instruction *I : NewInsts) {
// Instructions that have been inserted in predecessor(s) to materialize
// the load address do not retain their original debug locations. Doing
// so could lead to confusing (but correct) source attributions.
if (const DebugLoc &DL = I->getDebugLoc())
I->setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
// FIXME: We really _ought_ to insert these value numbers into their
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
VN.lookupOrAdd(I);
}
for (const auto &PredLoad : PredLoads) {
BasicBlock *UnavailablePred = PredLoad.first;
Value *LoadPtr = PredLoad.second;
auto *NewLoad =
new LoadInst(LI->getType(), LoadPtr, LI->getName() + ".pre",
LI->isVolatile(), LI->getAlignment(), LI->getOrdering(),
LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewLoad->setDebugLoc(LI->getDebugLoc());
// Transfer the old load's AA tags to the new load.
AAMDNodes Tags;
LI->getAAMetadata(Tags);
if (Tags)
NewLoad->setAAMetadata(Tags);
if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range))
NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
// We do not propagate the old load's debug location, because the new
// load now lives in a different BB, and we want to avoid a jumpy line
// table.
// FIXME: How do we retain source locations without causing poor debugging
// behavior?
// Add the newly created load.
ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
NewLoad));
MD->invalidateCachedPointerInfo(LoadPtr);
LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
}
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
if (Instruction *I = dyn_cast<Instruction>(V))
I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
<< "load eliminated by PRE";
});
++NumPRELoad;
return true;
}
static void reportLoadElim(LoadInst *LI, Value *AvailableValue,
OptimizationRemarkEmitter *ORE) {
using namespace ore;
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
<< "load of type " << NV("Type", LI->getType()) << " eliminated"
<< setExtraArgs() << " in favor of "
<< NV("InfavorOfValue", AvailableValue);
});
}
/// Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVN::processNonLocalLoad(LoadInst *LI) {
// non-local speculations are not allowed under asan.
if (LI->getParent()->getParent()->hasFnAttribute(
Attribute::SanitizeAddress) ||
LI->getParent()->getParent()->hasFnAttribute(
Attribute::SanitizeHWAddress))
return false;
// Step 1: Find the non-local dependencies of the load.
LoadDepVect Deps;
MD->getNonLocalPointerDependency(LI, Deps);
// If we had to process more than one hundred blocks to find the
// dependencies, this load isn't worth worrying about. Optimizing
// it will be too expensive.
unsigned NumDeps = Deps.size();
if (NumDeps > MaxNumDeps)
return false;
// If we had a phi translation failure, we'll have a single entry which is a
// clobber in the current block. Reject this early.
if (NumDeps == 1 &&
!Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs());
dbgs() << " has unknown dependencies\n";);
return false;
}
// If this load follows a GEP, see if we can PRE the indices before analyzing.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) {
for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
OE = GEP->idx_end();
OI != OE; ++OI)
if (Instruction *I = dyn_cast<Instruction>(OI->get()))
performScalarPRE(I);
}
// Step 2: Analyze the availability of the load
AvailValInBlkVect ValuesPerBlock;
UnavailBlkVect UnavailableBlocks;
AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
// If we have no predecessors that produce a known value for this load, exit
// early.
if (ValuesPerBlock.empty())
return false;
// Step 3: Eliminate fully redundancy.
//
// If all of the instructions we depend on produce a known value for this
// load, then it is fully redundant and we can use PHI insertion to compute
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
if (Instruction *I = dyn_cast<Instruction>(V))
// If instruction I has debug info, then we should not update it.
// Also, if I has a null DebugLoc, then it is still potentially incorrect
// to propagate LI's DebugLoc because LI may not post-dominate I.
if (LI->getDebugLoc() && LI->getParent() == I->getParent())
I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumGVNLoad;
reportLoadElim(LI, V, ORE);
return true;
}
// Step 4: Eliminate partial redundancy.
if (!EnablePRE || !EnableLoadPRE)
return false;
return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
}
bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
"This function can only be called with llvm.assume intrinsic");
Value *V = IntrinsicI->getArgOperand(0);
if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
if (Cond->isZero()) {
Type *Int8Ty = Type::getInt8Ty(V->getContext());
// Insert a new store to null instruction before the load to indicate that
// this code is not reachable. FIXME: We could insert unreachable
// instruction directly because we can modify the CFG.
new StoreInst(UndefValue::get(Int8Ty),
Constant::getNullValue(Int8Ty->getPointerTo()),
IntrinsicI);
}
markInstructionForDeletion(IntrinsicI);
return false;
} else if (isa<Constant>(V)) {
// If it's not false, and constant, it must evaluate to true. This means our
// assume is assume(true), and thus, pointless, and we don't want to do
// anything more here.
return false;
}
Constant *True = ConstantInt::getTrue(V->getContext());
bool Changed = false;
for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
// This property is only true in dominated successors, propagateEquality
// will check dominance for us.
Changed |= propagateEquality(V, True, Edge, false);
}
// We can replace assume value with true, which covers cases like this:
// call void @llvm.assume(i1 %cmp)
// br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
ReplaceWithConstMap[V] = True;
// If one of *cmp *eq operand is const, adding it to map will cover this:
// %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
// call void @llvm.assume(i1 %cmp)
// ret float %0 ; will change it to ret float 3.000000e+00
if (auto *CmpI = dyn_cast<CmpInst>(V)) {
if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ ||
CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
(CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
CmpI->getFastMathFlags().noNaNs())) {
Value *CmpLHS = CmpI->getOperand(0);
Value *CmpRHS = CmpI->getOperand(1);
if (isa<Constant>(CmpLHS))
std::swap(CmpLHS, CmpRHS);
auto *RHSConst = dyn_cast<Constant>(CmpRHS);
// If only one operand is constant.
if (RHSConst != nullptr && !isa<Constant>(CmpLHS))
ReplaceWithConstMap[CmpLHS] = RHSConst;
}
}
return Changed;
}
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
patchReplacementInstruction(I, Repl);
I->replaceAllUsesWith(Repl);
}
/// Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L) {
if (!MD)
return false;
// This code hasn't been audited for ordered or volatile memory access
if (!L->isUnordered())
return false;
if (L->use_empty()) {
markInstructionForDeletion(L);
return true;
}
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
// If it is defined in another block, try harder.
if (Dep.isNonLocal())
return processNonLocalLoad(L);
// Only handle the local case below
if (!Dep.isDef() && !Dep.isClobber()) {
// This might be a NonFuncLocal or an Unknown
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; L->printAsOperand(dbgs());
dbgs() << " has unknown dependence\n";);
return false;
}
AvailableValue AV;
if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
// Replace the load!
patchAndReplaceAllUsesWith(L, AvailableValue);
markInstructionForDeletion(L);
++NumGVNLoad;
reportLoadElim(L, AvailableValue, ORE);
// Tell MDA to rexamine the reused pointer since we might have more
// information after forwarding it.
if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(AvailableValue);
return true;
}
return false;
}
/// Return a pair the first field showing the value number of \p Exp and the
/// second field showing whether it is a value number newly created.
std::pair<uint32_t, bool>
GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
uint32_t &e = expressionNumbering[Exp];
bool CreateNewValNum = !e;
if (CreateNewValNum) {
Expressions.push_back(Exp);
if (ExprIdx.size() < nextValueNumber + 1)
ExprIdx.resize(nextValueNumber * 2);
e = nextValueNumber;
ExprIdx[nextValueNumber++] = nextExprNumber++;
}
return {e, CreateNewValNum};
}
/// Return whether all the values related with the same \p num are
/// defined in \p BB.
bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
GVN &Gvn) {
LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
while (Vals && Vals->BB == BB)
Vals = Vals->Next;
return !Vals;
}
/// Wrap phiTranslateImpl to provide caching functionality.
uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
const BasicBlock *PhiBlock, uint32_t Num,
GVN &Gvn) {
auto FindRes = PhiTranslateTable.find({Num, Pred});
if (FindRes != PhiTranslateTable.end())
return FindRes->second;
uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
PhiTranslateTable.insert({{Num, Pred}, NewNum});
return NewNum;
}
+// Return true if the value number \p Num and NewNum have equal value.
+// Return false if the result is unknown.
+bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
+ const BasicBlock *Pred,
+ const BasicBlock *PhiBlock, GVN &Gvn) {
+ CallInst *Call = nullptr;
+ LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
+ while (Vals) {
+ Call = dyn_cast<CallInst>(Vals->Val);
+ if (Call && Call->getParent() == PhiBlock)
+ break;
+ Vals = Vals->Next;
+ }
+
+ if (AA->doesNotAccessMemory(Call))
+ return true;
+
+ if (!MD || !AA->onlyReadsMemory(Call))
+ return false;
+
+ MemDepResult local_dep = MD->getDependency(Call);
+ if (!local_dep.isNonLocal())
+ return false;
+
+ const MemoryDependenceResults::NonLocalDepInfo &deps =
+ MD->getNonLocalCallDependency(Call);
+
+ // Check to see if the Call has no function local clobber.
+ for (unsigned i = 0; i < deps.size(); i++) {
+ if (deps[i].getResult().isNonFuncLocal())
+ return true;
+ }
+ return false;
+}
+
/// Translate value number \p Num using phis, so that it has the values of
/// the phis in BB.
uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
const BasicBlock *PhiBlock,
uint32_t Num, GVN &Gvn) {
if (PHINode *PN = NumberingPhi[Num]) {
for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
return TransVal;
}
return Num;
}
// If there is any value related with Num is defined in a BB other than
// PhiBlock, it cannot depend on a phi in PhiBlock without going through
// a backedge. We can do an early exit in that case to save compile time.
if (!areAllValsInBB(Num, PhiBlock, Gvn))
return Num;
if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
return Num;
Expression Exp = Expressions[ExprIdx[Num]];
for (unsigned i = 0; i < Exp.varargs.size(); i++) {
// For InsertValue and ExtractValue, some varargs are index numbers
// instead of value numbers. Those index numbers should not be
// translated.
if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
(i > 0 && Exp.opcode == Instruction::ExtractValue))
continue;
Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
}
if (Exp.commutative) {
assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!");
if (Exp.varargs[0] > Exp.varargs[1]) {
std::swap(Exp.varargs[0], Exp.varargs[1]);
uint32_t Opcode = Exp.opcode >> 8;
if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
Exp.opcode = (Opcode << 8) |
CmpInst::getSwappedPredicate(
static_cast<CmpInst::Predicate>(Exp.opcode & 255));
}
}
- if (uint32_t NewNum = expressionNumbering[Exp])
+ if (uint32_t NewNum = expressionNumbering[Exp]) {
+ if (Exp.opcode == Instruction::Call && NewNum != Num)
+ return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
return NewNum;
+ }
return Num;
}
/// Erase stale entry from phiTranslate cache so phiTranslate can be computed
/// again.
void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
const BasicBlock &CurrBlock) {
for (const BasicBlock *Pred : predecessors(&CurrBlock)) {
auto FindRes = PhiTranslateTable.find({Num, Pred});
if (FindRes != PhiTranslateTable.end())
PhiTranslateTable.erase(FindRes);
}
}
// In order to find a leader for a given value number at a
// specific basic block, we first obtain the list of all Values for that number,
// and then scan the list to find one whose block dominates the block in
// question. This is fast because dominator tree queries consist of only
// a few comparisons of DFS numbers.
Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
LeaderTableEntry Vals = LeaderTable[num];
if (!Vals.Val) return nullptr;
Value *Val = nullptr;
if (DT->dominates(Vals.BB, BB)) {
Val = Vals.Val;
if (isa<Constant>(Val)) return Val;
}
LeaderTableEntry* Next = Vals.Next;
while (Next) {
if (DT->dominates(Next->BB, BB)) {
if (isa<Constant>(Next->Val)) return Next->Val;
if (!Val) Val = Next->Val;
}
Next = Next->Next;
}
return Val;
}
/// There is an edge from 'Src' to 'Dst'. Return
/// true if every path from the entry block to 'Dst' passes via this edge. In
/// particular 'Dst' must not be reachable via another edge from 'Src'.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
DominatorTree *DT) {
// While in theory it is interesting to consider the case in which Dst has
// more than one predecessor, because Dst might be part of a loop which is
// only reachable from Src, in practice it is pointless since at the time
// GVN runs all such loops have preheaders, which means that Dst will have
// been changed to have only one predecessor, namely Src.
const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
assert((!Pred || Pred == E.getStart()) &&
"No edge between these basic blocks!");
return Pred != nullptr;
}
void GVN::assignBlockRPONumber(Function &F) {
BlockRPONumber.clear();
uint32_t NextBlockNumber = 1;
ReversePostOrderTraversal<Function *> RPOT(&F);
for (BasicBlock *BB : RPOT)
BlockRPONumber[BB] = NextBlockNumber++;
InvalidBlockRPONumbers = false;
}
// Tries to replace instruction with const, using information from
// ReplaceWithConstMap.
bool GVN::replaceOperandsWithConsts(Instruction *Instr) const {
bool Changed = false;
for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
Value *Operand = Instr->getOperand(OpNum);
auto it = ReplaceWithConstMap.find(Operand);
if (it != ReplaceWithConstMap.end()) {
assert(!isa<Constant>(Operand) &&
"Replacing constants with constants is invalid");
LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
<< *it->second << " in instruction " << *Instr << '\n');
Instr->setOperand(OpNum, it->second);
Changed = true;
}
}
return Changed;
}
/// The given values are known to be equal in every block
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
/// If DominatesByEdge is false, then it means that we will propagate the RHS
/// value starting from the end of Root.Start.
bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
bool DominatesByEdge) {
SmallVector<std::pair<Value*, Value*>, 4> Worklist;
Worklist.push_back(std::make_pair(LHS, RHS));
bool Changed = false;
// For speed, compute a conservative fast approximation to
// DT->dominates(Root, Root.getEnd());
const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
while (!Worklist.empty()) {
std::pair<Value*, Value*> Item = Worklist.pop_back_val();
LHS = Item.first; RHS = Item.second;
if (LHS == RHS)
continue;
assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
// Don't try to propagate equalities between constants.
if (isa<Constant>(LHS) && isa<Constant>(RHS))
continue;
// Prefer a constant on the right-hand side, or an Argument if no constants.
if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
std::swap(LHS, RHS);
assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
// If there is no obvious reason to prefer the left-hand side over the
// right-hand side, ensure the longest lived term is on the right-hand side,
// so the shortest lived term will be replaced by the longest lived.
// This tends to expose more simplifications.
uint32_t LVN = VN.lookupOrAdd(LHS);
if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
(isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
// Move the 'oldest' value to the right-hand side, using the value number
// as a proxy for age.
uint32_t RVN = VN.lookupOrAdd(RHS);
if (LVN < RVN) {
std::swap(LHS, RHS);
LVN = RVN;
}
}
// If value numbering later sees that an instruction in the scope is equal
// to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
// the invariant that instructions only occur in the leader table for their
// own value number (this is used by removeFromLeaderTable), do not do this
// if RHS is an instruction (if an instruction in the scope is morphed into
// LHS then it will be turned into RHS by the next GVN iteration anyway, so
// using the leader table is about compiling faster, not optimizing better).
// The leader table only tracks basic blocks, not edges. Only add to if we
// have the simple case where the edge dominates the end.
if (RootDominatesEnd && !isa<Instruction>(RHS))
addToLeaderTable(LVN, RHS, Root.getEnd());
// Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
// LHS always has at least one use that is not dominated by Root, this will
// never do anything if LHS has only one use.
if (!LHS->hasOneUse()) {
unsigned NumReplacements =
DominatesByEdge
? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
: replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
// Cached information for anything that uses LHS will be invalid.
if (MD)
MD->invalidateCachedPointerInfo(LHS);
}
// Now try to deduce additional equalities from this one. For example, if
// the known equality was "(A != B)" == "false" then it follows that A and B
// are equal in the scope. Only boolean equalities with an explicit true or
// false RHS are currently supported.
if (!RHS->getType()->isIntegerTy(1))
// Not a boolean equality - bail out.
continue;
ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
if (!CI)
// RHS neither 'true' nor 'false' - bail out.
continue;
// Whether RHS equals 'true'. Otherwise it equals 'false'.
bool isKnownTrue = CI->isMinusOne();
bool isKnownFalse = !isKnownTrue;
// If "A && B" is known true then both A and B are known true. If "A || B"
// is known false then both A and B are known false.
Value *A, *B;
if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) ||
(isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) {
Worklist.push_back(std::make_pair(A, RHS));
Worklist.push_back(std::make_pair(B, RHS));
continue;
}
// If we are propagating an equality like "(A == B)" == "true" then also
// propagate the equality A == B. When propagating a comparison such as
// "(A >= B)" == "true", replace all instances of "A < B" with "false".
if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
// If "A == B" is known true, or "A != B" is known false, then replace
// A with B everywhere in the scope.
if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) ||
(isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE))
Worklist.push_back(std::make_pair(Op0, Op1));
// Handle the floating point versions of equality comparisons too.
if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) ||
(isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) {
// Floating point -0.0 and 0.0 compare equal, so we can only
// propagate values if we know that we have a constant and that
// its value is non-zero.
// FIXME: We should do this optimization if 'no signed zeros' is
// applicable via an instruction-level fast-math-flag or some other
// indicator that relaxed FP semantics are being used.
if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero())
Worklist.push_back(std::make_pair(Op0, Op1));
}
// If "A >= B" is known true, replace "A < B" with false everywhere.
CmpInst::Predicate NotPred = Cmp->getInversePredicate();
Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
// Since we don't have the instruction "A < B" immediately to hand, work
// out the value number that it would have and use that to find an
// appropriate instruction (if any).
uint32_t NextNum = VN.getNextUnusedValueNumber();
uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
// If the number we were assigned was brand new then there is no point in
// looking for an instruction realizing it: there cannot be one!
if (Num < NextNum) {
Value *NotCmp = findLeader(Root.getEnd(), Num);
if (NotCmp && isa<Instruction>(NotCmp)) {
unsigned NumReplacements =
DominatesByEdge
? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
: replaceDominatedUsesWith(NotCmp, NotVal, *DT,
Root.getStart());
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
// Cached information for anything that uses NotCmp will be invalid.
if (MD)
MD->invalidateCachedPointerInfo(NotCmp);
}
}
// Ensure that any instruction in scope that gets the "A < B" value number
// is replaced with false.
// The leader table only tracks basic blocks, not edges. Only add to if we
// have the simple case where the edge dominates the end.
if (RootDominatesEnd)
addToLeaderTable(Num, NotVal, Root.getEnd());
continue;
}
}
return Changed;
}
/// When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I) {
// Ignore dbg info intrinsics.
if (isa<DbgInfoIntrinsic>(I))
return false;
// If the instruction can be easily simplified then do so now in preference
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
bool Changed = false;
if (!I->use_empty()) {
I->replaceAllUsesWith(V);
Changed = true;
}
if (isInstructionTriviallyDead(I, TLI)) {
markInstructionForDeletion(I);
Changed = true;
}
if (Changed) {
if (MD && V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
++NumGVNSimpl;
return true;
}
}
if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
return processAssumeIntrinsic(IntrinsicI);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (processLoad(LI))
return true;
unsigned Num = VN.lookupOrAdd(LI);
addToLeaderTable(Num, LI, LI->getParent());
return false;
}
// For conditional branches, we can perform simple conditional propagation on
// the condition value itself.
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
if (!BI->isConditional())
return false;
if (isa<Constant>(BI->getCondition()))
return processFoldableCondBr(BI);
Value *BranchCond = BI->getCondition();
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
// Avoid multiple edges early.
if (TrueSucc == FalseSucc)
return false;
BasicBlock *Parent = BI->getParent();
bool Changed = false;
Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
BasicBlockEdge TrueE(Parent, TrueSucc);
Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
BasicBlockEdge FalseE(Parent, FalseSucc);
Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
return Changed;
}
// For switches, propagate the case values into the case destinations.
if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
Value *SwitchCond = SI->getCondition();
BasicBlock *Parent = SI->getParent();
bool Changed = false;
// Remember how many outgoing edges there are to every successor.
SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
++SwitchEdges[SI->getSuccessor(i)];
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
BasicBlock *Dst = i->getCaseSuccessor();
// If there is only a single edge, propagate the case value into it.
if (SwitchEdges.lookup(Dst) == 1) {
BasicBlockEdge E(Parent, Dst);
Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
}
}
return Changed;
}
// Instructions with void type don't return a value, so there's
// no point in trying to find redundancies in them.
if (I->getType()->isVoidTy())
return false;
uint32_t NextNum = VN.getNextUnusedValueNumber();
unsigned Num = VN.lookupOrAdd(I);
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
// If the number we were assigned was a brand new VN, then we don't
// need to do a lookup to see if the number already exists
// somewhere in the domtree: it can't!
if (Num >= NextNum) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Value *Repl = findLeader(I->getParent(), Num);
if (!Repl) {
// Failure, just remember this instance for future use.
addToLeaderTable(Num, I, I->getParent());
return false;
} else if (Repl == I) {
// If I was the result of a shortcut PRE, it might already be in the table
// and the best replacement for itself. Nothing to do.
return false;
}
// Remove it!
patchAndReplaceAllUsesWith(I, Repl);
if (MD && Repl->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(Repl);
markInstructionForDeletion(I);
return true;
}
/// runOnFunction - This is the main transformation entry point for a function.
bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
const TargetLibraryInfo &RunTLI, AAResults &RunAA,
MemoryDependenceResults *RunMD, LoopInfo *LI,
OptimizationRemarkEmitter *RunORE) {
AC = &RunAC;
DT = &RunDT;
VN.setDomTree(DT);
TLI = &RunTLI;
VN.setAliasAnalysis(&RunAA);
MD = RunMD;
ImplicitControlFlowTracking ImplicitCFT(DT);
ICF = &ImplicitCFT;
VN.setMemDep(MD);
ORE = RunORE;
InvalidBlockRPONumbers = true;
bool Changed = false;
bool ShouldContinue = true;
DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
BasicBlock *BB = &*FI++;
bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, nullptr, MD);
if (removedBlock)
++NumGVNBlocks;
Changed |= removedBlock;
}
unsigned Iteration = 0;
while (ShouldContinue) {
LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
ShouldContinue = iterateOnFunction(F);
Changed |= ShouldContinue;
++Iteration;
}
if (EnablePRE) {
// Fabricate val-num for dead-code in order to suppress assertion in
// performPRE().
assignValNumForDeadCode();
bool PREChanged = true;
while (PREChanged) {
PREChanged = performPRE(F);
Changed |= PREChanged;
}
}
// FIXME: Should perform GVN again after PRE does something. PRE can move
// computations into blocks where they become fully redundant. Note that
// we can't do this until PRE's critical edge splitting updates memdep.
// Actually, when this happens, we should just fully integrate PRE into GVN.
cleanupGlobalSets();
// Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
// iteration.
DeadBlocks.clear();
return Changed;
}
bool GVN::processBlock(BasicBlock *BB) {
// FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
// (and incrementing BI before processing an instruction).
assert(InstrsToErase.empty() &&
"We expect InstrsToErase to be empty across iterations");
if (DeadBlocks.count(BB))
return false;
// Clearing map before every BB because it can be used only for single BB.
ReplaceWithConstMap.clear();
bool ChangedFunction = false;
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE;) {
if (!ReplaceWithConstMap.empty())
ChangedFunction |= replaceOperandsWithConsts(&*BI);
ChangedFunction |= processInstruction(&*BI);
if (InstrsToErase.empty()) {
++BI;
continue;
}
// If we need some instructions deleted, do it now.
NumGVNInstr += InstrsToErase.size();
// Avoid iterator invalidation.
bool AtStart = BI == BB->begin();
if (!AtStart)
--BI;
for (auto *I : InstrsToErase) {
assert(I->getParent() == BB && "Removing instruction from wrong block?");
LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
salvageDebugInfo(*I);
if (MD) MD->removeInstruction(I);
LLVM_DEBUG(verifyRemoved(I));
ICF->removeInstruction(I);
I->eraseFromParent();
}
InstrsToErase.clear();
if (AtStart)
BI = BB->begin();
else
++BI;
}
return ChangedFunction;
}
// Instantiate an expression in a predecessor that lacked it.
bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
BasicBlock *Curr, unsigned int ValNo) {
// Because we are going top-down through the block, all value numbers
// will be available in the predecessor by the time we need them. Any
// that weren't originally present will have been instantiated earlier
// in this loop.
bool success = true;
for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
Value *Op = Instr->getOperand(i);
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
// This could be a newly inserted instruction, in which case, we won't
// find a value number, and should give up before we hurt ourselves.
// FIXME: Rewrite the infrastructure to let it easier to value number
// and process newly inserted instructions.
if (!VN.exists(Op)) {
success = false;
break;
}
uint32_t TValNo =
VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
if (Value *V = findLeader(Pred, TValNo)) {
Instr->setOperand(i, V);
} else {
success = false;
break;
}
}
// Fail out if we encounter an operand that is not available in
// the PRE predecessor. This is typically because of loads which
// are not value numbered precisely.
if (!success)
return false;
Instr->insertBefore(Pred->getTerminator());
Instr->setName(Instr->getName() + ".pre");
Instr->setDebugLoc(Instr->getDebugLoc());
unsigned Num = VN.lookupOrAdd(Instr);
VN.add(Instr, Num);
// Update the availability map to include the new instruction.
addToLeaderTable(Num, Instr, Pred);
return true;
}
bool GVN::performScalarPRE(Instruction *CurInst) {
if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
isa<DbgInfoIntrinsic>(CurInst))
return false;
// Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
// sinking the compare again, and it would force the code generator to
// move the i1 from processor flags or predicate registers into a general
// purpose register.
if (isa<CmpInst>(CurInst))
return false;
// Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
// sinking the addressing mode computation back to its uses. Extending the
// GEP's live range increases the register pressure, and therefore it can
// introduce unnecessary spills.
//
// This doesn't prevent Load PRE. PHI translation will make the GEP available
// to the load by moving it to the predecessor block if necessary.
if (isa<GetElementPtrInst>(CurInst))
return false;
// We don't currently value number ANY inline asm calls.
if (auto *CallB = dyn_cast<CallBase>(CurInst))
if (CallB->isInlineAsm())
return false;
uint32_t ValNo = VN.lookup(CurInst);
// Look for the predecessors for PRE opportunities. We're
// only trying to solve the basic diamond case, where
// a value is computed in the successor and one predecessor,
// but not the other. We also explicitly disallow cases
// where the successor is its own predecessor, because they're
// more complicated to get right.
unsigned NumWith = 0;
unsigned NumWithout = 0;
BasicBlock *PREPred = nullptr;
BasicBlock *CurrentBlock = CurInst->getParent();
// Update the RPO numbers for this function.
if (InvalidBlockRPONumbers)
assignBlockRPONumber(*CurrentBlock->getParent());
SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
for (BasicBlock *P : predecessors(CurrentBlock)) {
// We're not interested in PRE where blocks with predecessors that are
// not reachable.
if (!DT->isReachableFromEntry(P)) {
NumWithout = 2;
break;
}
// It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
// when CurInst has operand defined in CurrentBlock (so it may be defined
// by phi in the loop header).
assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
"Invalid BlockRPONumber map.");
if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
llvm::any_of(CurInst->operands(), [&](const Use &U) {
if (auto *Inst = dyn_cast<Instruction>(U.get()))
return Inst->getParent() == CurrentBlock;
return false;
})) {
NumWithout = 2;
break;
}
uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
Value *predV = findLeader(P, TValNo);
if (!predV) {
predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
PREPred = P;
++NumWithout;
} else if (predV == CurInst) {
/* CurInst dominates this predecessor. */
NumWithout = 2;
break;
} else {
predMap.push_back(std::make_pair(predV, P));
++NumWith;
}
}
// Don't do PRE when it might increase code size, i.e. when
// we would need to insert instructions in more than one pred.
if (NumWithout > 1 || NumWith == 0)
return false;
// We may have a case where all predecessors have the instruction,
// and we just need to insert a phi node. Otherwise, perform
// insertion.
Instruction *PREInstr = nullptr;
if (NumWithout != 0) {
if (!isSafeToSpeculativelyExecute(CurInst)) {
// It is only valid to insert a new instruction if the current instruction
// is always executed. An instruction with implicit control flow could
// prevent us from doing it. If we cannot speculate the execution, then
// PRE should be prohibited.
if (ICF->isDominatedByICFIFromSameBlock(CurInst))
return false;
}
// Don't do PRE across indirect branch.
if (isa<IndirectBrInst>(PREPred->getTerminator()))
return false;
// Don't do PRE across callbr.
// FIXME: Can we do this across the fallthrough edge?
if (isa<CallBrInst>(PREPred->getTerminator()))
return false;
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
// on the function.
unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
return false;
}
// We need to insert somewhere, so let's give it a shot
PREInstr = CurInst->clone();
if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
// If we failed insertion, make sure we remove the instruction.
LLVM_DEBUG(verifyRemoved(PREInstr));
PREInstr->deleteValue();
return false;
}
}
// Either we should have filled in the PRE instruction, or we should
// not have needed insertions.
assert(PREInstr != nullptr || NumWithout == 0);
++NumGVNPRE;
// Create a PHI to make the value available in this block.
PHINode *Phi =
PHINode::Create(CurInst->getType(), predMap.size(),
CurInst->getName() + ".pre-phi", &CurrentBlock->front());
for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
if (Value *V = predMap[i].first) {
// If we use an existing value in this phi, we have to patch the original
// value because the phi will be used to replace a later value.
patchReplacementInstruction(CurInst, V);
Phi->addIncoming(V, predMap[i].second);
} else
Phi->addIncoming(PREInstr, PREPred);
}
VN.add(Phi, ValNo);
// After creating a new PHI for ValNo, the phi translate result for ValNo will
// be changed, so erase the related stale entries in phi translate cache.
VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
addToLeaderTable(ValNo, Phi, CurrentBlock);
Phi->setDebugLoc(CurInst->getDebugLoc());
CurInst->replaceAllUsesWith(Phi);
if (MD && Phi->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(Phi);
VN.erase(CurInst);
removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
if (MD)
MD->removeInstruction(CurInst);
LLVM_DEBUG(verifyRemoved(CurInst));
// FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
// some assertion failures.
ICF->removeInstruction(CurInst);
CurInst->eraseFromParent();
++NumGVNInstr;
return true;
}
/// Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
bool GVN::performPRE(Function &F) {
bool Changed = false;
for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
// Nothing to PRE in the entry block.
if (CurrentBlock == &F.getEntryBlock())
continue;
// Don't perform PRE on an EH pad.
if (CurrentBlock->isEHPad())
continue;
for (BasicBlock::iterator BI = CurrentBlock->begin(),
BE = CurrentBlock->end();
BI != BE;) {
Instruction *CurInst = &*BI++;
Changed |= performScalarPRE(CurInst);
}
}
if (splitCriticalEdges())
Changed = true;
return Changed;
}
/// Split the critical edge connecting the given two blocks, and return
/// the block inserted to the critical edge.
BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
BasicBlock *BB =
SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT));
if (MD)
MD->invalidateCachedPredecessors();
InvalidBlockRPONumbers = true;
return BB;
}
/// Split critical edges found during the previous
/// iteration that may enable further optimization.
bool GVN::splitCriticalEdges() {
if (toSplit.empty())
return false;
do {
std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
SplitCriticalEdge(Edge.first, Edge.second,
CriticalEdgeSplittingOptions(DT));
} while (!toSplit.empty());
if (MD) MD->invalidateCachedPredecessors();
InvalidBlockRPONumbers = true;
return true;
}
/// Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
cleanupGlobalSets();
// Top-down walk of the dominator tree
bool Changed = false;
// Needed for value numbering with phi construction to work.
// RPOT walks the graph in its constructor and will not be invalidated during
// processBlock.
ReversePostOrderTraversal<Function *> RPOT(&F);
for (BasicBlock *BB : RPOT)
Changed |= processBlock(BB);
return Changed;
}
void GVN::cleanupGlobalSets() {
VN.clear();
LeaderTable.clear();
BlockRPONumber.clear();
TableAllocator.Reset();
ICF->clear();
InvalidBlockRPONumbers = true;
}
/// Verify that the specified instruction does not occur in our
/// internal data structures.
void GVN::verifyRemoved(const Instruction *Inst) const {
VN.verifyRemoved(Inst);
// Walk through the value number scope to make sure the instruction isn't
// ferreted away in it.
for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator
I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
const LeaderTableEntry *Node = &I->second;
assert(Node->Val != Inst && "Inst still in value numbering scope!");
while (Node->Next) {
Node = Node->Next;
assert(Node->Val != Inst && "Inst still in value numbering scope!");
}
}
}
/// BB is declared dead, which implied other blocks become dead as well. This
/// function is to add all these blocks to "DeadBlocks". For the dead blocks'
/// live successors, update their phi nodes by replacing the operands
/// corresponding to dead blocks with UndefVal.
void GVN::addDeadBlock(BasicBlock *BB) {
SmallVector<BasicBlock *, 4> NewDead;
SmallSetVector<BasicBlock *, 4> DF;
NewDead.push_back(BB);
while (!NewDead.empty()) {
BasicBlock *D = NewDead.pop_back_val();
if (DeadBlocks.count(D))
continue;
// All blocks dominated by D are dead.
SmallVector<BasicBlock *, 8> Dom;
DT->getDescendants(D, Dom);
DeadBlocks.insert(Dom.begin(), Dom.end());
// Figure out the dominance-frontier(D).
for (BasicBlock *B : Dom) {
for (BasicBlock *S : successors(B)) {
if (DeadBlocks.count(S))
continue;
bool AllPredDead = true;
for (BasicBlock *P : predecessors(S))
if (!DeadBlocks.count(P)) {
AllPredDead = false;
break;
}
if (!AllPredDead) {
// S could be proved dead later on. That is why we don't update phi
// operands at this moment.
DF.insert(S);
} else {
// While S is not dominated by D, it is dead by now. This could take
// place if S already have a dead predecessor before D is declared
// dead.
NewDead.push_back(S);
}
}
}
}
// For the dead blocks' live successors, update their phi nodes by replacing
// the operands corresponding to dead blocks with UndefVal.
for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end();
I != E; I++) {
BasicBlock *B = *I;
if (DeadBlocks.count(B))
continue;
SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B));
for (BasicBlock *P : Preds) {
if (!DeadBlocks.count(P))
continue;
if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) {
if (BasicBlock *S = splitCriticalEdges(P, B))
DeadBlocks.insert(P = S);
}
for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) {
PHINode &Phi = cast<PHINode>(*II);
Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
if (MD)
MD->invalidateCachedPointerInfo(&Phi);
}
}
}
}
// If the given branch is recognized as a foldable branch (i.e. conditional
// branch with constant condition), it will perform following analyses and
// transformation.
// 1) If the dead out-coming edge is a critical-edge, split it. Let
// R be the target of the dead out-coming edge.
// 1) Identify the set of dead blocks implied by the branch's dead outcoming
// edge. The result of this step will be {X| X is dominated by R}
// 2) Identify those blocks which haves at least one dead predecessor. The
// result of this step will be dominance-frontier(R).
// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
//
// Return true iff *NEW* dead code are found.
bool GVN::processFoldableCondBr(BranchInst *BI) {
if (!BI || BI->isUnconditional())
return false;
// If a branch has two identical successors, we cannot declare either dead.
if (BI->getSuccessor(0) == BI->getSuccessor(1))
return false;
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
if (!Cond)
return false;
BasicBlock *DeadRoot =
Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
if (DeadBlocks.count(DeadRoot))
return false;
if (!DeadRoot->getSinglePredecessor())
DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
addDeadBlock(DeadRoot);
return true;
}
// performPRE() will trigger assert if it comes across an instruction without
// associated val-num. As it normally has far more live instructions than dead
// instructions, it makes more sense just to "fabricate" a val-number for the
// dead code than checking if instruction involved is dead or not.
void GVN::assignValNumForDeadCode() {
for (BasicBlock *BB : DeadBlocks) {
for (Instruction &Inst : *BB) {
unsigned ValNum = VN.lookupOrAdd(&Inst);
addToLeaderTable(ValNum, &Inst, BB);
}
}
}
class llvm::gvn::GVNLegacyPass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
explicit GVNLegacyPass(bool NoMemDepAnalysis = !EnableMemDep)
: FunctionPass(ID), NoMemDepAnalysis(NoMemDepAnalysis) {
initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
return Impl.runImpl(
F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
getAnalysis<AAResultsWrapperPass>().getAAResults(),
NoMemDepAnalysis ? nullptr
: &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(),
LIWP ? &LIWP->getLoopInfo() : nullptr,
&getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
if (!NoMemDepAnalysis)
AU.addRequired<MemoryDependenceWrapperPass>();
AU.addRequired<AAResultsWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.addPreserved<TargetLibraryInfoWrapperPass>();
AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
}
private:
bool NoMemDepAnalysis;
GVN Impl;
};
char GVNLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
// The public interface to this file...
FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
return new GVNLegacyPass(NoMemDepAnalysis);
}
Index: stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp (revision 356465)
@@ -1,1533 +1,1537 @@
//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs various transformations related to eliminating memcpy
// calls, or transforming sets of stores into memset's.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "memcpyopt"
STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
STATISTIC(NumMemSetInfer, "Number of memsets inferred");
STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
bool &VariableIdxFound,
const DataLayout &DL) {
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
/*skip along*/;
// Compute the offset implied by the rest of the indices.
int64_t Offset = 0;
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (!OpC)
return VariableIdxFound = true;
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
// Otherwise, we have a sequential type like an array or vector. Multiply
// the index by the ElementSize.
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*OpC->getSExtValue();
}
return Offset;
}
/// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
/// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
/// might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
const DataLayout &DL) {
Ptr1 = Ptr1->stripPointerCasts();
Ptr2 = Ptr2->stripPointerCasts();
// Handle the trivial case first.
if (Ptr1 == Ptr2) {
Offset = 0;
return true;
}
GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
bool VariableIdxFound = false;
// If one pointer is a GEP and the other isn't, then see if the GEP is a
// constant offset from the base, as in "P" and "gep P, 1".
if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
return !VariableIdxFound;
}
if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
return !VariableIdxFound;
}
// Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
// base. After that base, they may have some number of common (and
// potentially variable) indices. After that they handle some constant
// offset, which determines their offset from each other. At this point, we
// handle no other case.
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
return false;
// Skip any common indices and track the GEP types.
unsigned Idx = 1;
for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
break;
int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
if (VariableIdxFound) return false;
Offset = Offset2-Offset1;
return true;
}
namespace {
/// Represents a range of memset'd bytes with the ByteVal value.
/// This allows us to analyze stores like:
/// store 0 -> P+1
/// store 0 -> P+0
/// store 0 -> P+3
/// store 0 -> P+2
/// which sometimes happens with stores to arrays of structs etc. When we see
/// the first store, we make a range [1, 2). The second store extends the range
/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
/// two ranges into [0, 3) which is memset'able.
struct MemsetRange {
// Start/End - A semi range that describes the span that this range covers.
// The range is closed at the start and open at the end: [Start, End).
int64_t Start, End;
/// StartPtr - The getelementptr instruction that points to the start of the
/// range.
Value *StartPtr;
/// Alignment - The known alignment of the first store.
unsigned Alignment;
/// TheStores - The actual stores that make up this range.
SmallVector<Instruction*, 16> TheStores;
bool isProfitableToUseMemset(const DataLayout &DL) const;
};
} // end anonymous namespace
bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
if (TheStores.size() >= 4 || End-Start >= 16) return true;
// If there is nothing to merge, don't do anything.
if (TheStores.size() < 2) return false;
// If any of the stores are a memset, then it is always good to extend the
// memset.
for (Instruction *SI : TheStores)
if (!isa<StoreInst>(SI))
return true;
// Assume that the code generator is capable of merging pairs of stores
// together if it wants to.
if (TheStores.size() == 2) return false;
// If we have fewer than 8 stores, it can still be worthwhile to do this.
// For example, merging 4 i8 stores into an i32 store is useful almost always.
// However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
// memset will be split into 2 32-bit stores anyway) and doing so can
// pessimize the llvm optimizer.
//
// Since we don't have perfect knowledge here, make some assumptions: assume
// the maximum GPR width is the same size as the largest legal integer
// size. If so, check to see whether we will end up actually reducing the
// number of stores used.
unsigned Bytes = unsigned(End-Start);
unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
if (MaxIntSize == 0)
MaxIntSize = 1;
unsigned NumPointerStores = Bytes / MaxIntSize;
// Assume the remaining bytes if any are done a byte at a time.
unsigned NumByteStores = Bytes % MaxIntSize;
// If we will reduce the # stores (according to this heuristic), do the
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
// etc.
return TheStores.size() > NumPointerStores+NumByteStores;
}
namespace {
class MemsetRanges {
using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
/// A sorted list of the memset ranges.
SmallVector<MemsetRange, 8> Ranges;
const DataLayout &DL;
public:
MemsetRanges(const DataLayout &DL) : DL(DL) {}
using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
const_iterator begin() const { return Ranges.begin(); }
const_iterator end() const { return Ranges.end(); }
bool empty() const { return Ranges.empty(); }
void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
addStore(OffsetFromFirst, SI);
else
addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
}
void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
addRange(OffsetFromFirst, StoreSize,
SI->getPointerOperand(), SI->getAlignment(), SI);
}
void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
}
void addRange(int64_t Start, int64_t Size, Value *Ptr,
unsigned Alignment, Instruction *Inst);
};
} // end anonymous namespace
/// Add a new store to the MemsetRanges data structure. This adds a
/// new range for the specified store at the specified offset, merging into
/// existing ranges as appropriate.
void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
unsigned Alignment, Instruction *Inst) {
int64_t End = Start+Size;
range_iterator I = partition_point(
Ranges, [=](const MemsetRange &O) { return O.End < Start; });
// We now know that I == E, in which case we didn't find anything to merge
// with, or that Start <= I->End. If End < I->Start or I == E, then we need
// to insert a new range. Handle this now.
if (I == Ranges.end() || End < I->Start) {
MemsetRange &R = *Ranges.insert(I, MemsetRange());
R.Start = Start;
R.End = End;
R.StartPtr = Ptr;
R.Alignment = Alignment;
R.TheStores.push_back(Inst);
return;
}
// This store overlaps with I, add it.
I->TheStores.push_back(Inst);
// At this point, we may have an interval that completely contains our store.
// If so, just add it to the interval and return.
if (I->Start <= Start && I->End >= End)
return;
// Now we know that Start <= I->End and End >= I->Start so the range overlaps
// but is not entirely contained within the range.
// See if the range extends the start of the range. In this case, it couldn't
// possibly cause it to join the prior range, because otherwise we would have
// stopped on *it*.
if (Start < I->Start) {
I->Start = Start;
I->StartPtr = Ptr;
I->Alignment = Alignment;
}
// Now we know that Start <= I->End and Start >= I->Start (so the startpoint
// is in or right at the end of I), and that End >= I->Start. Extend I out to
// End.
if (End > I->End) {
I->End = End;
range_iterator NextI = I;
while (++NextI != Ranges.end() && End >= NextI->Start) {
// Merge the range in.
I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
if (NextI->End > I->End)
I->End = NextI->End;
Ranges.erase(NextI);
NextI = I;
}
}
}
//===----------------------------------------------------------------------===//
// MemCpyOptLegacyPass Pass
//===----------------------------------------------------------------------===//
namespace {
class MemCpyOptLegacyPass : public FunctionPass {
MemCpyOptPass Impl;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOptLegacyPass() : FunctionPass(ID) {
initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
private:
// This transformation requires dominator postdominator info
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<MemoryDependenceWrapperPass>();
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.addPreserved<MemoryDependenceWrapperPass>();
}
};
} // end anonymous namespace
char MemCpyOptLegacyPass::ID = 0;
/// The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
false, false)
/// When scanning forward over instructions, we look for some other patterns to
/// fold away. In particular, this looks for stores to neighboring locations of
/// memory. If it sees enough consecutive ones, it attempts to merge them
/// together into a memcpy/memset.
Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr,
Value *ByteVal) {
const DataLayout &DL = StartInst->getModule()->getDataLayout();
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(DL);
BasicBlock::iterator BI(StartInst);
for (++BI; !BI->isTerminator(); ++BI) {
if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
break;
continue;
}
if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this stored value is of the same byte-splattable value.
Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL);
if (isa<UndefValue>(ByteVal) && StoredByte)
ByteVal = StoredByte;
if (ByteVal != StoredByte)
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
DL))
break;
Ranges.addStore(Offset, NextStore);
} else {
MemSetInst *MSI = cast<MemSetInst>(BI);
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
break;
Ranges.addMemSet(Offset, MSI);
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
return nullptr;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
Ranges.addInst(0, StartInst);
// If we create any memsets, we put it right before the first instruction that
// isn't part of the memset block. This ensure that the memset is dominated
// by any addressing instruction needed by the start of the block.
IRBuilder<> Builder(&*BI);
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
Instruction *AMemSet = nullptr;
for (const MemsetRange &Range : Ranges) {
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
if (!Range.isProfitableToUseMemset(DL))
continue;
// Otherwise, we do want to transform this! Create a new memset.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = DL.getABITypeAlignment(EltType);
}
AMemSet =
Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
: Range.TheStores) dbgs()
<< *SI << '\n';
dbgs() << "With: " << *AMemSet << '\n');
if (!Range.TheStores.empty())
AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
// Zap all the stores.
for (Instruction *SI : Range.TheStores) {
MD->removeInstruction(SI);
SI->eraseFromParent();
}
++NumMemSetInfer;
}
return AMemSet;
}
static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) {
unsigned StoreAlign = SI->getAlignment();
if (!StoreAlign)
StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
return StoreAlign;
}
static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) {
unsigned LoadAlign = LI->getAlignment();
if (!LoadAlign)
LoadAlign = DL.getABITypeAlignment(LI->getType());
return LoadAlign;
}
static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
const LoadInst *LI) {
unsigned StoreAlign = findStoreAlignment(DL, SI);
unsigned LoadAlign = findLoadAlignment(DL, LI);
return MinAlign(StoreAlign, LoadAlign);
}
// This method try to lift a store instruction before position P.
// It will lift the store and its argument + that anything that
// may alias with these.
// The method returns true if it was successful.
static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
const LoadInst *LI) {
// If the store alias this position, early bail out.
MemoryLocation StoreLoc = MemoryLocation::get(SI);
if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
return false;
// Keep track of the arguments of all instruction we plan to lift
// so we can make sure to lift them as well if appropriate.
DenseSet<Instruction*> Args;
if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
if (Ptr->getParent() == SI->getParent())
Args.insert(Ptr);
// Instruction to lift before P.
SmallVector<Instruction*, 8> ToLift;
// Memory locations of lifted instructions.
SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
// Lifted calls.
SmallVector<const CallBase *, 8> Calls;
const MemoryLocation LoadLoc = MemoryLocation::get(LI);
for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
auto *C = &*I;
bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
bool NeedLift = false;
if (Args.erase(C))
NeedLift = true;
else if (MayAlias) {
NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
return isModOrRefSet(AA.getModRefInfo(C, ML));
});
if (!NeedLift)
NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
return isModOrRefSet(AA.getModRefInfo(C, Call));
});
}
if (!NeedLift)
continue;
if (MayAlias) {
// Since LI is implicitly moved downwards past the lifted instructions,
// none of them may modify its source.
if (isModSet(AA.getModRefInfo(C, LoadLoc)))
return false;
else if (const auto *Call = dyn_cast<CallBase>(C)) {
// If we can't lift this before P, it's game over.
if (isModOrRefSet(AA.getModRefInfo(P, Call)))
return false;
Calls.push_back(Call);
} else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
// If we can't lift this before P, it's game over.
auto ML = MemoryLocation::get(C);
if (isModOrRefSet(AA.getModRefInfo(P, ML)))
return false;
MemLocs.push_back(ML);
} else
// We don't know how to lift this instruction.
return false;
}
ToLift.push_back(C);
for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
- if (auto *A = dyn_cast<Instruction>(C->getOperand(k)))
- if (A->getParent() == SI->getParent())
+ if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
+ if (A->getParent() == SI->getParent()) {
+ // Cannot hoist user of P above P
+ if(A == P) return false;
Args.insert(A);
+ }
+ }
}
// We made it, we need to lift
for (auto *I : llvm::reverse(ToLift)) {
LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
I->moveBefore(P);
}
return true;
}
bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
// Avoid merging nontemporal stores since the resulting
// memcpy/memset would not be able to preserve the nontemporal hint.
// In theory we could teach how to propagate the !nontemporal metadata to
// memset calls. However, that change would force the backend to
// conservatively expand !nontemporal memset calls back to sequences of
// store instructions (effectively undoing the merging).
if (SI->getMetadata(LLVMContext::MD_nontemporal))
return false;
const DataLayout &DL = SI->getModule()->getDataLayout();
// Load to store forwarding can be interpreted as memcpy.
if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
auto *T = LI->getType();
if (T->isAggregateType()) {
AliasAnalysis &AA = LookupAliasAnalysis();
MemoryLocation LoadLoc = MemoryLocation::get(LI);
// We use alias analysis to check if an instruction may store to
// the memory we load from in between the load and the store. If
// such an instruction is found, we try to promote there instead
// of at the store position.
Instruction *P = SI;
for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
P = &I;
break;
}
}
// We found an instruction that may write to the loaded memory.
// We can try to promote at this position instead of the store
// position if nothing alias the store memory after this and the store
// destination is not in the range.
if (P && P != SI) {
if (!moveUp(AA, SI, P, LI))
P = nullptr;
}
// If a valid insertion position is found, then we can promote
// the load/store pair to a memcpy.
if (P) {
// If we load from memory that may alias the memory we store to,
// memmove must be used to preserve semantic. If not, memcpy can
// be used.
bool UseMemMove = false;
if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
UseMemMove = true;
uint64_t Size = DL.getTypeStoreSize(T);
IRBuilder<> Builder(P);
Instruction *M;
if (UseMemMove)
M = Builder.CreateMemMove(
SI->getPointerOperand(), findStoreAlignment(DL, SI),
LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
else
M = Builder.CreateMemCpy(
SI->getPointerOperand(), findStoreAlignment(DL, SI),
LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
<< *M << "\n");
MD->removeInstruction(SI);
SI->eraseFromParent();
MD->removeInstruction(LI);
LI->eraseFromParent();
++NumMemCpyInstr;
// Make sure we do not invalidate the iterator.
BBI = M->getIterator();
return true;
}
}
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
// a memcpy.
MemDepResult ldep = MD->getDependency(LI);
CallInst *C = nullptr;
if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
C = dyn_cast<CallInst>(ldep.getInst());
if (C) {
// Check that nothing touches the dest of the "copy" between
// the call and the store.
Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
AliasAnalysis &AA = LookupAliasAnalysis();
MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
I != E; --I) {
if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
C = nullptr;
break;
}
// The store to dest may never happen if an exception can be thrown
// between the load and the store.
if (I->mayThrow() && !CpyDestIsLocal) {
C = nullptr;
break;
}
}
}
if (C) {
bool changed = performCallSlotOptzn(
LI, SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
DL.getTypeStoreSize(SI->getOperand(0)->getType()),
findCommonAlignment(DL, SI, LI), C);
if (changed) {
MD->removeInstruction(SI);
SI->eraseFromParent();
MD->removeInstruction(LI);
LI->eraseFromParent();
++NumMemCpyInstr;
return true;
}
}
}
}
// There are two cases that are interesting for this code to handle: memcpy
// and memset. Right now we only handle memset.
// Ensure that the value being stored is something that can be memset'able a
// byte at a time like "0" or "-1" or any width, as well as things like
// 0xA0A0A0A0 and 0.0.
auto *V = SI->getOperand(0);
if (Value *ByteVal = isBytewiseValue(V, DL)) {
if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
ByteVal)) {
BBI = I->getIterator(); // Don't invalidate iterator.
return true;
}
// If we have an aggregate, we try to promote it to memset regardless
// of opportunity for merging as it can expose optimization opportunities
// in subsequent passes.
auto *T = V->getType();
if (T->isAggregateType()) {
uint64_t Size = DL.getTypeStoreSize(T);
unsigned Align = SI->getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(T);
IRBuilder<> Builder(SI);
auto *M =
Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align);
LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
MD->removeInstruction(SI);
SI->eraseFromParent();
NumMemSetInfer++;
// Make sure we do not invalidate the iterator.
BBI = M->getIterator();
return true;
}
}
return false;
}
bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
// See if there is another memset or store neighboring this memset which
// allows us to widen out the memset to do a single larger store.
if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
MSI->getValue())) {
BBI = I->getIterator(); // Don't invalidate iterator.
return true;
}
return false;
}
/// Takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
Value *cpySrc, uint64_t cpyLen,
unsigned cpyAlign, CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
// memcpy(dest, src, ...)
//
// ->
//
// memcpy(dest, src, ...)
// call @func(..., dest, ...)
//
// Since moving the memcpy is technically awkward, we additionally check that
// src only holds uninitialized values at the moment of the call, meaning that
// the memcpy can be discarded rather than moved.
// Lifetime marks shouldn't be operated on.
if (Function *F = C->getCalledFunction())
if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
return false;
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
CallSite CS(C);
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
const DataLayout &DL = cpy->getModule()->getDataLayout();
uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
return false;
// Check that accessing the first srcSize bytes of dest will not cause a
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
// The destination is an alloca. Check it is larger than srcSize.
ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
if (!destArraySize)
return false;
uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
} else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
// The store to dest may never happen if the call can throw.
if (C->mayThrow())
return false;
if (A->getDereferenceableBytes() < srcSize) {
// If the destination is an sret parameter then only accesses that are
// outside of the returned struct type can trap.
if (!A->hasStructRetAttr())
return false;
Type *StructTy = cast<PointerType>(A->getType())->getElementType();
if (!StructTy->isSized()) {
// The call may never return and hence the copy-instruction may never
// be executed, and therefore it's not safe to say "the destination
// has at least <cpyLen> bytes, as implied by the copy-instruction",
return false;
}
uint64_t destSize = DL.getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
}
} else {
return false;
}
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
return false;
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
srcAlloca->user_end());
while (!srcUseList.empty()) {
User *U = srcUseList.pop_back_val();
if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
for (User *UU : U->users())
srcUseList.push_back(UU);
continue;
}
if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
if (!G->hasAllZeroIndices())
return false;
for (User *UU : U->users())
srcUseList.push_back(UU);
continue;
}
if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
if (IT->isLifetimeStartOrEnd())
continue;
if (U != C && U != cpy)
return false;
}
// Check that src isn't captured by the called function since the
// transformation can cause aliasing issues in that case.
for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
return false;
// Since we're changing the parameter to the callsite, we need to make sure
// that what would be the new parameter dominates the callsite.
DominatorTree &DT = LookupDomTree();
if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
if (!DT.dominates(cpyDestInst, C))
return false;
// In addition to knowing that the call does not access src in some
// unexpected manner, for example via a global, which we deduce from
// the use analysis, we also need to know that it does not sneakily
// access dest. We rely on AA to figure this out for us.
AliasAnalysis &AA = LookupAliasAnalysis();
ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
// If necessary, perform additional analysis.
if (isModOrRefSet(MR))
MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT);
if (isModOrRefSet(MR))
return false;
// We can't create address space casts here because we don't know if they're
// safe for the target.
if (cpySrc->getType()->getPointerAddressSpace() !=
cpyDest->getType()->getPointerAddressSpace())
return false;
for (unsigned i = 0; i < CS.arg_size(); ++i)
if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
cpySrc->getType()->getPointerAddressSpace() !=
CS.getArgument(i)->getType()->getPointerAddressSpace())
return false;
// All the checks have passed, so do the transformation.
bool changedArgument = false;
for (unsigned i = 0; i < CS.arg_size(); ++i)
if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
: CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
cpyDest->getName(), C);
changedArgument = true;
if (CS.getArgument(i)->getType() == Dest->getType())
CS.setArgument(i, Dest);
else
CS.setArgument(i, CastInst::CreatePointerCast(Dest,
CS.getArgument(i)->getType(), Dest->getName(), C));
}
if (!changedArgument)
return false;
// If the destination wasn't sufficiently aligned then increase its alignment.
if (!isDestSufficientlyAligned) {
assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
}
// Drop any cached information about the call, because we may have changed
// its dependence information by changing its parameter.
MD->removeInstruction(C);
// Update AA metadata
// FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
// handled here, but combineMetadata doesn't support them yet
unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
LLVMContext::MD_noalias,
LLVMContext::MD_invariant_group,
LLVMContext::MD_access_group};
combineMetadata(C, cpy, KnownIDs, true);
// Remove the memcpy.
MD->removeInstruction(cpy);
++NumMemCpyInstr;
return true;
}
/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
MemCpyInst *MDep) {
// We can only transforms memcpy's where the dest of one is the source of the
// other.
if (M->getSource() != MDep->getDest() || MDep->isVolatile())
return false;
// If dep instruction is reading from our current input, then it is a noop
// transfer and substituting the input won't change this instruction. Just
// ignore the input and let someone else zap MDep. This handles cases like:
// memcpy(a <- a)
// memcpy(b <- a)
if (M->getSource() == MDep->getSource())
return false;
// Second, the length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
AliasAnalysis &AA = LookupAliasAnalysis();
// Verify that the copied-from memory doesn't change in between the two
// transfers. For example, in:
// memcpy(a <- b)
// *b = 42;
// memcpy(c <- a)
// It would be invalid to transform the second memcpy into memcpy(c <- b).
//
// TODO: If the code between M and MDep is transparent to the destination "c",
// then we could still perform the xform by moving M up to the first memcpy.
//
// NOTE: This is conservative, it will stop on any read from the source loc,
// not just the defining memcpy.
MemDepResult SourceDep =
MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
M->getIterator(), M->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
// If the dest of the second might alias the source of the first, then the
// source and dest might overlap. We still want to eliminate the intermediate
// value, but we have to generate a memmove instead of memcpy.
bool UseMemMove = false;
if (!AA.isNoAlias(MemoryLocation::getForDest(M),
MemoryLocation::getForSource(MDep)))
UseMemMove = true;
// If all checks passed, then we can transform M.
LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
<< *MDep << '\n' << *M << '\n');
// TODO: Is this worth it if we're creating a less aligned memcpy? For
// example we could be moving from movaps -> movq on x86.
IRBuilder<> Builder(M);
if (UseMemMove)
Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(),
MDep->getRawSource(), MDep->getSourceAlignment(),
M->getLength(), M->isVolatile());
else
Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(),
MDep->getRawSource(), MDep->getSourceAlignment(),
M->getLength(), M->isVolatile());
// Remove the instruction we're replacing.
MD->removeInstruction(M);
M->eraseFromParent();
++NumMemCpyInstr;
return true;
}
/// We've found that the (upward scanning) memory dependence of \p MemCpy is
/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
/// weren't copied over by \p MemCpy.
///
/// In other words, transform:
/// \code
/// memset(dst, c, dst_size);
/// memcpy(dst, src, src_size);
/// \endcode
/// into:
/// \code
/// memcpy(dst, src, src_size);
/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
/// \endcode
bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
MemSetInst *MemSet) {
// We can only transform memset/memcpy with the same destination.
if (MemSet->getDest() != MemCpy->getDest())
return false;
// Check that there are no other dependencies on the memset destination.
MemDepResult DstDepInfo =
MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
MemCpy->getIterator(), MemCpy->getParent());
if (DstDepInfo.getInst() != MemSet)
return false;
// Use the same i8* dest as the memcpy, killing the memset dest if different.
Value *Dest = MemCpy->getRawDest();
Value *DestSize = MemSet->getLength();
Value *SrcSize = MemCpy->getLength();
// By default, create an unaligned memset.
unsigned Align = 1;
// If Dest is aligned, and SrcSize is constant, use the minimum alignment
// of the sum.
const unsigned DestAlign =
std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
if (DestAlign > 1)
if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
IRBuilder<> Builder(MemCpy);
// If the sizes have different types, zext the smaller one.
if (DestSize->getType() != SrcSize->getType()) {
if (DestSize->getType()->getIntegerBitWidth() >
SrcSize->getType()->getIntegerBitWidth())
SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
else
DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
}
Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
Value *MemsetLen = Builder.CreateSelect(
Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
Builder.CreateMemSet(
Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
SrcSize),
MemSet->getOperand(1), MemsetLen, Align);
MD->removeInstruction(MemSet);
MemSet->eraseFromParent();
return true;
}
/// Determine whether the instruction has undefined content for the given Size,
/// either because it was freshly alloca'd or started its lifetime.
static bool hasUndefContents(Instruction *I, ConstantInt *Size) {
if (isa<AllocaInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
if (II->getIntrinsicID() == Intrinsic::lifetime_start)
if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
if (LTSize->getZExtValue() >= Size->getZExtValue())
return true;
return false;
}
/// Transform memcpy to memset when its source was just memset.
/// In other words, turn:
/// \code
/// memset(dst1, c, dst1_size);
/// memcpy(dst2, dst1, dst2_size);
/// \endcode
/// into:
/// \code
/// memset(dst1, c, dst1_size);
/// memset(dst2, c, dst2_size);
/// \endcode
/// When dst2_size <= dst1_size.
///
/// The \p MemCpy must have a Constant length.
bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
MemSetInst *MemSet) {
AliasAnalysis &AA = LookupAliasAnalysis();
// Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
// memcpying from the same address. Otherwise it is hard to reason about.
if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
return false;
// A known memset size is required.
ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
if (!MemSetSize)
return false;
// Make sure the memcpy doesn't read any more than what the memset wrote.
// Don't worry about sizes larger than i64.
ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) {
// If the memcpy is larger than the memset, but the memory was undef prior
// to the memset, we can just ignore the tail. Technically we're only
// interested in the bytes from MemSetSize..CopySize here, but as we can't
// easily represent this location, we use the full 0..CopySize range.
MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
MemDepResult DepInfo = MD->getPointerDependencyFrom(
MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
CopySize = MemSetSize;
else
return false;
}
IRBuilder<> Builder(MemCpy);
Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
CopySize, MemCpy->getDestAlignment());
return true;
}
/// Perform simplification of memcpy's. If we have memcpy A
/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
/// B to be a memcpy from X to Z (or potentially a memmove, depending on
/// circumstances). This allows later passes to remove the first memcpy
/// altogether.
bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
// We can only optimize non-volatile memcpy's.
if (M->isVolatile()) return false;
// If the source and destination of the memcpy are the same, then zap it.
if (M->getSource() == M->getDest()) {
MD->removeInstruction(M);
M->eraseFromParent();
return false;
}
// If copying from a constant, try to turn the memcpy into a memset.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
M->getModule()->getDataLayout())) {
IRBuilder<> Builder(M);
Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
M->getDestAlignment(), false);
MD->removeInstruction(M);
M->eraseFromParent();
++NumCpyToSet;
return true;
}
MemDepResult DepInfo = MD->getDependency(M);
// Try to turn a partially redundant memset + memcpy into
// memcpy + smaller memset. We don't need the memcpy size for this.
if (DepInfo.isClobber())
if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
if (processMemSetMemCpyDependence(M, MDep))
return true;
// The optimizations after this point require the memcpy size.
ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
if (!CopySize) return false;
// There are four possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE.
// b) call-memcpy xform for return slot optimization.
// c) memcpy from freshly alloca'd space or space that has just started its
// lifetime copies undefined data, and we can therefore eliminate the
// memcpy in favor of the data that was already at the destination.
// d) memcpy from a just-memset'd source can be turned into memset.
if (DepInfo.isClobber()) {
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
// FIXME: Can we pass in either of dest/src alignment here instead
// of conservatively taking the minimum?
unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment());
if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
CopySize->getZExtValue(), Align,
C)) {
MD->removeInstruction(M);
M->eraseFromParent();
return true;
}
}
}
MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
SrcLoc, true, M->getIterator(), M->getParent());
if (SrcDepInfo.isClobber()) {
if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
return processMemCpyMemCpyDependence(M, MDep);
} else if (SrcDepInfo.isDef()) {
if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) {
MD->removeInstruction(M);
M->eraseFromParent();
++NumMemCpyInstr;
return true;
}
}
if (SrcDepInfo.isClobber())
if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
if (performMemCpyToMemSetOptzn(M, MDep)) {
MD->removeInstruction(M);
M->eraseFromParent();
++NumCpyToSet;
return true;
}
return false;
}
/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
/// not to alias.
bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
AliasAnalysis &AA = LookupAliasAnalysis();
if (!TLI->has(LibFunc_memmove))
return false;
// See if the pointers alias.
if (!AA.isNoAlias(MemoryLocation::getForDest(M),
MemoryLocation::getForSource(M)))
return false;
LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
<< "\n");
// If not, then we know we can transform this.
Type *ArgTys[3] = { M->getRawDest()->getType(),
M->getRawSource()->getType(),
M->getLength()->getType() };
M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
Intrinsic::memcpy, ArgTys));
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
MD->removeInstruction(M);
++NumMoveToCpy;
return true;
}
/// This is called on every byval argument in call sites.
bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
MemDepResult DepInfo = MD->getPointerDependencyFrom(
MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
if (!DepInfo.isClobber())
return false;
// If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
// a memcpy, see if we can byval from the source of the memcpy instead of the
// result.
MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
if (!MDep || MDep->isVolatile() ||
ByValArg->stripPointerCasts() != MDep->getDest())
return false;
// The length of the memcpy must be larger or equal to the size of the byval.
ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
if (!C1 || C1->getValue().getZExtValue() < ByValSize)
return false;
// Get the alignment of the byval. If the call doesn't specify the alignment,
// then it is some target specific value that we can't know.
unsigned ByValAlign = CS.getParamAlignment(ArgNo);
if (ByValAlign == 0) return false;
// If it is greater than the memcpy, then we check to see if we can force the
// source of the memcpy to the alignment we need. If we fail, we bail out.
AssumptionCache &AC = LookupAssumptionCache();
DominatorTree &DT = LookupDomTree();
if (MDep->getSourceAlignment() < ByValAlign &&
getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
CS.getInstruction(), &AC, &DT) < ByValAlign)
return false;
// The address space of the memcpy source must match the byval argument
if (MDep->getSource()->getType()->getPointerAddressSpace() !=
ByValArg->getType()->getPointerAddressSpace())
return false;
// Verify that the copied-from memory doesn't change in between the memcpy and
// the byval call.
// memcpy(a <- b)
// *b = 42;
// foo(*a)
// It would be invalid to transform the second memcpy into foo(*b).
//
// NOTE: This is conservative, it will stop on any read from the source loc,
// not just the defining memcpy.
MemDepResult SourceDep = MD->getPointerDependencyFrom(
MemoryLocation::getForSource(MDep), false,
CS.getInstruction()->getIterator(), MDep->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
Value *TmpCast = MDep->getSource();
if (MDep->getSource()->getType() != ByValArg->getType())
TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
"tmpcast", CS.getInstruction());
LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n"
<< " " << *CS.getInstruction() << "\n");
// Otherwise we're good! Update the byval argument.
CS.setArgument(ArgNo, TmpCast);
++NumMemCpyInstr;
return true;
}
/// Executes one iteration of MemCpyOptPass.
bool MemCpyOptPass::iterateOnFunction(Function &F) {
bool MadeChange = false;
DominatorTree &DT = LookupDomTree();
// Walk all instruction in the function.
for (BasicBlock &BB : F) {
// Skip unreachable blocks. For example processStore assumes that an
// instruction in a BB can't be dominated by a later instruction in the
// same BB (which is a scenario that can happen for an unreachable BB that
// has itself as a predecessor).
if (!DT.isReachableFromEntry(&BB))
continue;
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
// Avoid invalidating the iterator.
Instruction *I = &*BI++;
bool RepeatInstruction = false;
if (StoreInst *SI = dyn_cast<StoreInst>(I))
MadeChange |= processStore(SI, BI);
else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
RepeatInstruction = processMemSet(M, BI);
else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
RepeatInstruction = processMemCpy(M);
else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
RepeatInstruction = processMemMove(M);
else if (auto CS = CallSite(I)) {
for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
if (CS.isByValArgument(i))
MadeChange |= processByValArgument(CS, i);
}
// Reprocess the instruction if desired.
if (RepeatInstruction) {
if (BI != BB.begin())
--BI;
MadeChange = true;
}
}
}
return MadeChange;
}
PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
return AM.getResult<AAManager>(F);
};
auto LookupAssumptionCache = [&]() -> AssumptionCache & {
return AM.getResult<AssumptionAnalysis>(F);
};
auto LookupDomTree = [&]() -> DominatorTree & {
return AM.getResult<DominatorTreeAnalysis>(F);
};
bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
LookupAssumptionCache, LookupDomTree);
if (!MadeChange)
return PreservedAnalyses::all();
PreservedAnalyses PA;
PA.preserveSet<CFGAnalyses>();
PA.preserve<GlobalsAA>();
PA.preserve<MemoryDependenceAnalysis>();
return PA;
}
bool MemCpyOptPass::runImpl(
Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
std::function<AliasAnalysis &()> LookupAliasAnalysis_,
std::function<AssumptionCache &()> LookupAssumptionCache_,
std::function<DominatorTree &()> LookupDomTree_) {
bool MadeChange = false;
MD = MD_;
TLI = TLI_;
LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
LookupAssumptionCache = std::move(LookupAssumptionCache_);
LookupDomTree = std::move(LookupDomTree_);
// If we don't have at least memset and memcpy, there is little point of doing
// anything here. These are required by a freestanding implementation, so if
// even they are disabled, there is no point in trying hard.
if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
return false;
while (true) {
if (!iterateOnFunction(F))
break;
MadeChange = true;
}
MD = nullptr;
return MadeChange;
}
/// This is the main transformation entry point for a function.
bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
if (skipFunction(F))
return false;
auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
return getAnalysis<AAResultsWrapperPass>().getAAResults();
};
auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
};
auto LookupDomTree = [this]() -> DominatorTree & {
return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
};
return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
LookupDomTree);
}
Index: stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp (revision 356465)
@@ -1,4648 +1,4656 @@
//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This transformation implements the well known scalar replacement of
/// aggregates transformation. It tries to identify promotable elements of an
/// aggregate alloca, and promote them to registers. It will also try to
/// convert uses of an element (or set of elements) of an alloca into a vector
/// or bitfield-style integer scalar if appropriate.
///
/// It works to do this with minimal slicing of the alloca so that regions
/// which are merely transferred in and out of external memory remain unchanged
/// and are not decomposed to scalar code.
///
/// Because this also performs alloca promotion, it can be thought of as also
/// serving the purpose of SSA formation. The algorithm iterates on the
/// function until all opportunities for promotion have been realized.
///
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/SROA.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantFolder.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#ifndef NDEBUG
// We only use this for a debug check.
#include <random>
#endif
using namespace llvm;
using namespace llvm::sroa;
#define DEBUG_TYPE "sroa"
STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
STATISTIC(NumDeleted, "Number of instructions deleted");
STATISTIC(NumVectorized, "Number of vectorized aggregates");
/// Hidden option to enable randomly shuffling the slices to help uncover
/// instability in their order.
static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
cl::init(false), cl::Hidden);
/// Hidden option to experiment with completely strict handling of inbounds
/// GEPs.
static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
cl::Hidden);
namespace {
/// A custom IRBuilder inserter which prefixes all names, but only in
/// Assert builds.
class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter {
std::string Prefix;
const Twine getNameWithPrefix(const Twine &Name) const {
return Name.isTriviallyEmpty() ? Name : Prefix + Name;
}
public:
void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
protected:
void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
BasicBlock::iterator InsertPt) const {
IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB,
InsertPt);
}
};
/// Provide a type for IRBuilder that drops names in release builds.
using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>;
/// A used slice of an alloca.
///
/// This structure represents a slice of an alloca used by some instruction. It
/// stores both the begin and end offsets of this use, a pointer to the use
/// itself, and a flag indicating whether we can classify the use as splittable
/// or not when forming partitions of the alloca.
class Slice {
/// The beginning offset of the range.
uint64_t BeginOffset = 0;
/// The ending offset, not included in the range.
uint64_t EndOffset = 0;
/// Storage for both the use of this slice and whether it can be
/// split.
PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
public:
Slice() = default;
Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
: BeginOffset(BeginOffset), EndOffset(EndOffset),
UseAndIsSplittable(U, IsSplittable) {}
uint64_t beginOffset() const { return BeginOffset; }
uint64_t endOffset() const { return EndOffset; }
bool isSplittable() const { return UseAndIsSplittable.getInt(); }
void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
Use *getUse() const { return UseAndIsSplittable.getPointer(); }
bool isDead() const { return getUse() == nullptr; }
void kill() { UseAndIsSplittable.setPointer(nullptr); }
/// Support for ordering ranges.
///
/// This provides an ordering over ranges such that start offsets are
/// always increasing, and within equal start offsets, the end offsets are
/// decreasing. Thus the spanning range comes first in a cluster with the
/// same start position.
bool operator<(const Slice &RHS) const {
if (beginOffset() < RHS.beginOffset())
return true;
if (beginOffset() > RHS.beginOffset())
return false;
if (isSplittable() != RHS.isSplittable())
return !isSplittable();
if (endOffset() > RHS.endOffset())
return true;
return false;
}
/// Support comparison with a single offset to allow binary searches.
friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
uint64_t RHSOffset) {
return LHS.beginOffset() < RHSOffset;
}
friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
const Slice &RHS) {
return LHSOffset < RHS.beginOffset();
}
bool operator==(const Slice &RHS) const {
return isSplittable() == RHS.isSplittable() &&
beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
}
bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
};
} // end anonymous namespace
/// Representation of the alloca slices.
///
/// This class represents the slices of an alloca which are formed by its
/// various uses. If a pointer escapes, we can't fully build a representation
/// for the slices used and we reflect that in this structure. The uses are
/// stored, sorted by increasing beginning offset and with unsplittable slices
/// starting at a particular offset before splittable slices.
class llvm::sroa::AllocaSlices {
public:
/// Construct the slices of a particular alloca.
AllocaSlices(const DataLayout &DL, AllocaInst &AI);
/// Test whether a pointer to the allocation escapes our analysis.
///
/// If this is true, the slices are never fully built and should be
/// ignored.
bool isEscaped() const { return PointerEscapingInstr; }
/// Support for iterating over the slices.
/// @{
using iterator = SmallVectorImpl<Slice>::iterator;
using range = iterator_range<iterator>;
iterator begin() { return Slices.begin(); }
iterator end() { return Slices.end(); }
using const_iterator = SmallVectorImpl<Slice>::const_iterator;
using const_range = iterator_range<const_iterator>;
const_iterator begin() const { return Slices.begin(); }
const_iterator end() const { return Slices.end(); }
/// @}
/// Erase a range of slices.
void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
/// Insert new slices for this alloca.
///
/// This moves the slices into the alloca's slices collection, and re-sorts
/// everything so that the usual ordering properties of the alloca's slices
/// hold.
void insert(ArrayRef<Slice> NewSlices) {
int OldSize = Slices.size();
Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
llvm::sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
}
// Forward declare the iterator and range accessor for walking the
// partitions.
class partition_iterator;
iterator_range<partition_iterator> partitions();
/// Access the dead users for this alloca.
ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
/// Access the dead operands referring to this alloca.
///
/// These are operands which have cannot actually be used to refer to the
/// alloca as they are outside its range and the user doesn't correct for
/// that. These mostly consist of PHI node inputs and the like which we just
/// need to replace with undef.
ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
void printSlice(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void printUse(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
void dump(const_iterator I) const;
void dump() const;
#endif
private:
template <typename DerivedT, typename RetT = void> class BuilderBase;
class SliceBuilder;
friend class AllocaSlices::SliceBuilder;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Handle to alloca instruction to simplify method interfaces.
AllocaInst &AI;
#endif
/// The instruction responsible for this alloca not having a known set
/// of slices.
///
/// When an instruction (potentially) escapes the pointer to the alloca, we
/// store a pointer to that here and abort trying to form slices of the
/// alloca. This will be null if the alloca slices are analyzed successfully.
Instruction *PointerEscapingInstr;
/// The slices of the alloca.
///
/// We store a vector of the slices formed by uses of the alloca here. This
/// vector is sorted by increasing begin offset, and then the unsplittable
/// slices before the splittable ones. See the Slice inner class for more
/// details.
SmallVector<Slice, 8> Slices;
/// Instructions which will become dead if we rewrite the alloca.
///
/// Note that these are not separated by slice. This is because we expect an
/// alloca to be completely rewritten or not rewritten at all. If rewritten,
/// all these instructions can simply be removed and replaced with undef as
/// they come from outside of the allocated space.
SmallVector<Instruction *, 8> DeadUsers;
/// Operands which will become dead if we rewrite the alloca.
///
/// These are operands that in their particular use can be replaced with
/// undef when we rewrite the alloca. These show up in out-of-bounds inputs
/// to PHI nodes and the like. They aren't entirely dead (there might be
/// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
/// want to swap this particular input for undef to simplify the use lists of
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
};
/// A partition of the slices.
///
/// An ephemeral representation for a range of slices which can be viewed as
/// a partition of the alloca. This range represents a span of the alloca's
/// memory which cannot be split, and provides access to all of the slices
/// overlapping some part of the partition.
///
/// Objects of this type are produced by traversing the alloca's slices, but
/// are only ephemeral and not persistent.
class llvm::sroa::Partition {
private:
friend class AllocaSlices;
friend class AllocaSlices::partition_iterator;
using iterator = AllocaSlices::iterator;
/// The beginning and ending offsets of the alloca for this
/// partition.
uint64_t BeginOffset, EndOffset;
/// The start and end iterators of this partition.
iterator SI, SJ;
/// A collection of split slice tails overlapping the partition.
SmallVector<Slice *, 4> SplitTails;
/// Raw constructor builds an empty partition starting and ending at
/// the given iterator.
Partition(iterator SI) : SI(SI), SJ(SI) {}
public:
/// The start offset of this partition.
///
/// All of the contained slices start at or after this offset.
uint64_t beginOffset() const { return BeginOffset; }
/// The end offset of this partition.
///
/// All of the contained slices end at or before this offset.
uint64_t endOffset() const { return EndOffset; }
/// The size of the partition.
///
/// Note that this can never be zero.
uint64_t size() const {
assert(BeginOffset < EndOffset && "Partitions must span some bytes!");
return EndOffset - BeginOffset;
}
/// Test whether this partition contains no slices, and merely spans
/// a region occupied by split slices.
bool empty() const { return SI == SJ; }
/// \name Iterate slices that start within the partition.
/// These may be splittable or unsplittable. They have a begin offset >= the
/// partition begin offset.
/// @{
// FIXME: We should probably define a "concat_iterator" helper and use that
// to stitch together pointee_iterators over the split tails and the
// contiguous iterators of the partition. That would give a much nicer
// interface here. We could then additionally expose filtered iterators for
// split, unsplit, and unsplittable splices based on the usage patterns.
iterator begin() const { return SI; }
iterator end() const { return SJ; }
/// @}
/// Get the sequence of split slice tails.
///
/// These tails are of slices which start before this partition but are
/// split and overlap into the partition. We accumulate these while forming
/// partitions.
ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
};
/// An iterator over partitions of the alloca's slices.
///
/// This iterator implements the core algorithm for partitioning the alloca's
/// slices. It is a forward iterator as we don't support backtracking for
/// efficiency reasons, and re-use a single storage area to maintain the
/// current set of split slices.
///
/// It is templated on the slice iterator type to use so that it can operate
/// with either const or non-const slice iterators.
class AllocaSlices::partition_iterator
: public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
Partition> {
friend class AllocaSlices;
/// Most of the state for walking the partitions is held in a class
/// with a nice interface for examining them.
Partition P;
/// We need to keep the end of the slices to know when to stop.
AllocaSlices::iterator SE;
/// We also need to keep track of the maximum split end offset seen.
/// FIXME: Do we really?
uint64_t MaxSplitSliceEndOffset = 0;
/// Sets the partition to be empty at given iterator, and sets the
/// end iterator.
partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE)
: P(SI), SE(SE) {
// If not already at the end, advance our state to form the initial
// partition.
if (SI != SE)
advance();
}
/// Advance the iterator to the next partition.
///
/// Requires that the iterator not be at the end of the slices.
void advance() {
assert((P.SI != SE || !P.SplitTails.empty()) &&
"Cannot advance past the end of the slices!");
// Clear out any split uses which have ended.
if (!P.SplitTails.empty()) {
if (P.EndOffset >= MaxSplitSliceEndOffset) {
// If we've finished all splits, this is easy.
P.SplitTails.clear();
MaxSplitSliceEndOffset = 0;
} else {
// Remove the uses which have ended in the prior partition. This
// cannot change the max split slice end because we just checked that
// the prior partition ended prior to that max.
P.SplitTails.erase(llvm::remove_if(P.SplitTails,
[&](Slice *S) {
return S->endOffset() <=
P.EndOffset;
}),
P.SplitTails.end());
assert(llvm::any_of(P.SplitTails,
[&](Slice *S) {
return S->endOffset() == MaxSplitSliceEndOffset;
}) &&
"Could not find the current max split slice offset!");
assert(llvm::all_of(P.SplitTails,
[&](Slice *S) {
return S->endOffset() <= MaxSplitSliceEndOffset;
}) &&
"Max split slice end offset is not actually the max!");
}
}
// If P.SI is already at the end, then we've cleared the split tail and
// now have an end iterator.
if (P.SI == SE) {
assert(P.SplitTails.empty() && "Failed to clear the split slices!");
return;
}
// If we had a non-empty partition previously, set up the state for
// subsequent partitions.
if (P.SI != P.SJ) {
// Accumulate all the splittable slices which started in the old
// partition into the split list.
for (Slice &S : P)
if (S.isSplittable() && S.endOffset() > P.EndOffset) {
P.SplitTails.push_back(&S);
MaxSplitSliceEndOffset =
std::max(S.endOffset(), MaxSplitSliceEndOffset);
}
// Start from the end of the previous partition.
P.SI = P.SJ;
// If P.SI is now at the end, we at most have a tail of split slices.
if (P.SI == SE) {
P.BeginOffset = P.EndOffset;
P.EndOffset = MaxSplitSliceEndOffset;
return;
}
// If the we have split slices and the next slice is after a gap and is
// not splittable immediately form an empty partition for the split
// slices up until the next slice begins.
if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset &&
!P.SI->isSplittable()) {
P.BeginOffset = P.EndOffset;
P.EndOffset = P.SI->beginOffset();
return;
}
}
// OK, we need to consume new slices. Set the end offset based on the
// current slice, and step SJ past it. The beginning offset of the
// partition is the beginning offset of the next slice unless we have
// pre-existing split slices that are continuing, in which case we begin
// at the prior end offset.
P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
P.EndOffset = P.SI->endOffset();
++P.SJ;
// There are two strategies to form a partition based on whether the
// partition starts with an unsplittable slice or a splittable slice.
if (!P.SI->isSplittable()) {
// When we're forming an unsplittable region, it must always start at
// the first slice and will extend through its end.
assert(P.BeginOffset == P.SI->beginOffset());
// Form a partition including all of the overlapping slices with this
// unsplittable slice.
while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
if (!P.SJ->isSplittable())
P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
++P.SJ;
}
// We have a partition across a set of overlapping unsplittable
// partitions.
return;
}
// If we're starting with a splittable slice, then we need to form
// a synthetic partition spanning it and any other overlapping splittable
// splices.
assert(P.SI->isSplittable() && "Forming a splittable partition!");
// Collect all of the overlapping splittable slices.
while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset &&
P.SJ->isSplittable()) {
P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
++P.SJ;
}
// Back upiP.EndOffset if we ended the span early when encountering an
// unsplittable slice. This synthesizes the early end offset of
// a partition spanning only splittable slices.
if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
assert(!P.SJ->isSplittable());
P.EndOffset = P.SJ->beginOffset();
}
}
public:
bool operator==(const partition_iterator &RHS) const {
assert(SE == RHS.SE &&
"End iterators don't match between compared partition iterators!");
// The observed positions of partitions is marked by the P.SI iterator and
// the emptiness of the split slices. The latter is only relevant when
// P.SI == SE, as the end iterator will additionally have an empty split
// slices list, but the prior may have the same P.SI and a tail of split
// slices.
if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) {
assert(P.SJ == RHS.P.SJ &&
"Same set of slices formed two different sized partitions!");
assert(P.SplitTails.size() == RHS.P.SplitTails.size() &&
"Same slice position with differently sized non-empty split "
"slice tails!");
return true;
}
return false;
}
partition_iterator &operator++() {
advance();
return *this;
}
Partition &operator*() { return P; }
};
/// A forward range over the partitions of the alloca's slices.
///
/// This accesses an iterator range over the partitions of the alloca's
/// slices. It computes these partitions on the fly based on the overlapping
/// offsets of the slices and the ability to split them. It will visit "empty"
/// partitions to cover regions of the alloca only accessed via split
/// slices.
iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() {
return make_range(partition_iterator(begin(), end()),
partition_iterator(end(), end()));
}
static Value *foldSelectInst(SelectInst &SI) {
// If the condition being selected on is a constant or the same value is
// being selected between, fold the select. Yes this does (rarely) happen
// early on.
if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
return SI.getOperand(1 + CI->isZero());
if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
return nullptr;
}
/// A helper that folds a PHI node or a select.
static Value *foldPHINodeOrSelectInst(Instruction &I) {
if (PHINode *PN = dyn_cast<PHINode>(&I)) {
// If PN merges together the same value, return that value.
return PN->hasConstantValue();
}
return foldSelectInst(cast<SelectInst>(I));
}
/// Builder for the alloca slices.
///
/// This class builds a set of alloca slices by recursively visiting the uses
/// of an alloca and making a slice for each load and store at each offset.
class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
friend class PtrUseVisitor<SliceBuilder>;
friend class InstVisitor<SliceBuilder>;
using Base = PtrUseVisitor<SliceBuilder>;
const uint64_t AllocSize;
AllocaSlices &AS;
SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
/// Set to de-duplicate dead instructions found in the use walk.
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
: PtrUseVisitor<SliceBuilder>(DL),
AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {}
private:
void markAsDead(Instruction &I) {
if (VisitedDeadInsts.insert(&I).second)
AS.DeadUsers.push_back(&I);
}
void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
bool IsSplittable = false) {
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
if (Size == 0 || Offset.uge(AllocSize)) {
LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @"
<< Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
<< " alloca: " << AS.AI << "\n"
<< " use: " << I << "\n");
return markAsDead(I);
}
uint64_t BeginOffset = Offset.getZExtValue();
uint64_t EndOffset = BeginOffset + Size;
// Clamp the end offset to the end of the allocation. Note that this is
// formulated to handle even the case where "BeginOffset + Size" overflows.
// This may appear superficially to be something we could ignore entirely,
// but that is not so! There may be widened loads or PHI-node uses where
// some instructions are dead but not others. We can't completely ignore
// them, and so have to record at least the information here.
assert(AllocSize >= BeginOffset); // Established above.
if (Size > AllocSize - BeginOffset) {
LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @"
<< Offset << " to remain within the " << AllocSize
<< " byte alloca:\n"
<< " alloca: " << AS.AI << "\n"
<< " use: " << I << "\n");
EndOffset = AllocSize;
}
AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
}
void visitBitCastInst(BitCastInst &BC) {
if (BC.use_empty())
return markAsDead(BC);
return Base::visitBitCastInst(BC);
}
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
if (ASC.use_empty())
return markAsDead(ASC);
return Base::visitAddrSpaceCastInst(ASC);
}
void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
if (GEPI.use_empty())
return markAsDead(GEPI);
if (SROAStrictInbounds && GEPI.isInBounds()) {
// FIXME: This is a manually un-factored variant of the basic code inside
// of GEPs with checking of the inbounds invariant specified in the
// langref in a very strict sense. If we ever want to enable
// SROAStrictInbounds, this code should be factored cleanly into
// PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
// by writing out the code here where we have the underlying allocation
// size readily available.
APInt GEPOffset = Offset;
const DataLayout &DL = GEPI.getModule()->getDataLayout();
for (gep_type_iterator GTI = gep_type_begin(GEPI),
GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
if (!OpC)
break;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
GEPOffset +=
APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
} else {
// For array or vector indices, scale the index by the size of the
// type.
APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
GEPOffset += Index * APInt(Offset.getBitWidth(),
DL.getTypeAllocSize(GTI.getIndexedType()));
}
// If this index has computed an intermediate pointer which is not
// inbounds, then the result of the GEP is a poison value and we can
// delete it and all uses.
if (GEPOffset.ugt(AllocSize))
return markAsDead(GEPI);
}
}
return Base::visitGetElementPtrInst(GEPI);
}
void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
uint64_t Size, bool IsVolatile) {
// We allow splitting of non-volatile loads and stores where the type is an
// integer type. These may be used to implement 'memcpy' or other "transfer
// of bits" patterns.
bool IsSplittable = Ty->isIntegerTy() && !IsVolatile;
insertUse(I, Offset, Size, IsSplittable);
}
void visitLoadInst(LoadInst &LI) {
assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
"All simple FCA loads should have been pre-split");
if (!IsOffsetKnown)
return PI.setAborted(&LI);
if (LI.isVolatile() &&
LI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
return PI.setAborted(&LI);
uint64_t Size = DL.getTypeStoreSize(LI.getType());
return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
}
void visitStoreInst(StoreInst &SI) {
Value *ValOp = SI.getValueOperand();
if (ValOp == *U)
return PI.setEscapedAndAborted(&SI);
if (!IsOffsetKnown)
return PI.setAborted(&SI);
if (SI.isVolatile() &&
SI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
return PI.setAborted(&SI);
uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
// If this memory access can be shown to *statically* extend outside the
// bounds of the allocation, it's behavior is undefined, so simply
// ignore it. Note that this is more strict than the generic clamping
// behavior of insertUse. We also try to handle cases which might run the
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @"
<< Offset << " which extends past the end of the "
<< AllocSize << " byte alloca:\n"
<< " alloca: " << AS.AI << "\n"
<< " use: " << SI << "\n");
return markAsDead(SI);
}
assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
"All simple FCA stores should have been pre-split");
handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
}
void visitMemSetInst(MemSetInst &II) {
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
(IsOffsetKnown && Offset.uge(AllocSize)))
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
if (!IsOffsetKnown)
return PI.setAborted(&II);
// Don't replace this with a store with a different address space. TODO:
// Use a store with the casted new alloca?
if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace())
return PI.setAborted(&II);
insertUse(II, Offset, Length ? Length->getLimitedValue()
: AllocSize - Offset.getLimitedValue(),
(bool)Length);
}
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if (Length && Length->getValue() == 0)
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
// Because we can visit these intrinsics twice, also check to see if the
// first time marked this instruction as dead. If so, skip it.
if (VisitedDeadInsts.count(&II))
return;
if (!IsOffsetKnown)
return PI.setAborted(&II);
// Don't replace this with a load/store with a different address space.
// TODO: Use a store with the casted new alloca?
if (II.isVolatile() &&
(II.getDestAddressSpace() != DL.getAllocaAddrSpace() ||
II.getSourceAddressSpace() != DL.getAllocaAddrSpace()))
return PI.setAborted(&II);
// This side of the transfer is completely out-of-bounds, and so we can
// nuke the entire transfer. However, we also need to nuke the other side
// if already added to our partitions.
// FIXME: Yet another place we really should bypass this when
// instrumenting for ASan.
if (Offset.uge(AllocSize)) {
SmallDenseMap<Instruction *, unsigned>::iterator MTPI =
MemTransferSliceMap.find(&II);
if (MTPI != MemTransferSliceMap.end())
AS.Slices[MTPI->second].kill();
return markAsDead(II);
}
uint64_t RawOffset = Offset.getLimitedValue();
uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset;
// Check for the special case where the same exact value is used for both
// source and dest.
if (*U == II.getRawDest() && *U == II.getRawSource()) {
// For non-volatile transfers this is a no-op.
if (!II.isVolatile())
return markAsDead(II);
return insertUse(II, Offset, Size, /*IsSplittable=*/false);
}
// If we have seen both source and destination for a mem transfer, then
// they both point to the same alloca.
bool Inserted;
SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
std::tie(MTPI, Inserted) =
MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
unsigned PrevIdx = MTPI->second;
if (!Inserted) {
Slice &PrevP = AS.Slices[PrevIdx];
// Check if the begin offsets match and this is a non-volatile transfer.
// In that case, we can completely elide the transfer.
if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
PrevP.kill();
return markAsDead(II);
}
// Otherwise we have an offset transfer within the same alloca. We can't
// split those.
PrevP.makeUnsplittable();
}
// Insert the use now that we've fixed up the splittable nature.
insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
// Check that we ended up with a valid index in the map.
assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
"Map index doesn't point back to a slice with this user.");
}
// Disable SRoA for any intrinsics except for lifetime invariants.
// FIXME: What about debug intrinsics? This matches old behavior, but
// doesn't make sense.
void visitIntrinsicInst(IntrinsicInst &II) {
if (!IsOffsetKnown)
return PI.setAborted(&II);
if (II.isLifetimeStartOrEnd()) {
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
Length->getLimitedValue());
insertUse(II, Offset, Size, true);
return;
}
Base::visitIntrinsicInst(II);
}
Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
// We consider any PHI or select that results in a direct load or store of
// the same offset to be a viable use for slicing purposes. These uses
// are considered unsplittable and the size is the maximum loaded or stored
// size.
SmallPtrSet<Instruction *, 4> Visited;
SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
Visited.insert(Root);
Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
const DataLayout &DL = Root->getModule()->getDataLayout();
// If there are no loads or stores, the access is dead. We mark that as
// a size zero access.
Size = 0;
do {
Instruction *I, *UsedI;
std::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Op = SI->getOperand(0);
if (Op == UsedI)
return SI;
Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
continue;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
if (!GEP->hasAllZeroIndices())
return GEP;
} else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
!isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) {
return I;
}
for (User *U : I->users())
if (Visited.insert(cast<Instruction>(U)).second)
Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
return nullptr;
}
void visitPHINodeOrSelectInst(Instruction &I) {
assert(isa<PHINode>(I) || isa<SelectInst>(I));
if (I.use_empty())
return markAsDead(I);
// TODO: We could use SimplifyInstruction here to fold PHINodes and
// SelectInsts. However, doing so requires to change the current
// dead-operand-tracking mechanism. For instance, suppose neither loading
// from %U nor %other traps. Then "load (select undef, %U, %other)" does not
// trap either. However, if we simply replace %U with undef using the
// current dead-operand-tracking mechanism, "load (select undef, undef,
// %other)" may trap because the select may return the first operand
// "undef".
if (Value *Result = foldPHINodeOrSelectInst(I)) {
if (Result == *U)
// If the result of the constant fold will be the pointer, recurse
// through the PHI/select as if we had RAUW'ed it.
enqueueUsers(I);
else
// Otherwise the operand to the PHI/select is dead, and we can replace
// it with undef.
AS.DeadOperands.push_back(U);
return;
}
if (!IsOffsetKnown)
return PI.setAborted(&I);
// See if we already have computed info on this node.
uint64_t &Size = PHIOrSelectSizes[&I];
if (!Size) {
// This is a new PHI/Select, check for an unsafe use of it.
if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
return PI.setAborted(UnsafeI);
}
// For PHI and select operands outside the alloca, we can't nuke the entire
// phi or select -- the other side might still be relevant, so we special
// case them here and use a separate structure to track the operands
// themselves which should be replaced with undef.
// FIXME: This should instead be escaped in the event we're instrumenting
// for address sanitization.
if (Offset.uge(AllocSize)) {
AS.DeadOperands.push_back(U);
return;
}
insertUse(I, Offset, Size);
}
void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); }
void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
/// Disable SROA entirely if there are unhandled users of the alloca.
void visitInstruction(Instruction &I) { PI.setAborted(&I); }
};
AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
PointerEscapingInstr(nullptr) {
SliceBuilder PB(DL, AI, *this);
SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
// FIXME: We should sink the escape vs. abort info into the caller nicely,
// possibly by just storing the PtrInfo in the AllocaSlices.
PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
: PtrI.getAbortingInst();
assert(PointerEscapingInstr && "Did not track a bad instruction");
return;
}
Slices.erase(
llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }),
Slices.end());
#ifndef NDEBUG
if (SROARandomShuffleSlices) {
std::mt19937 MT(static_cast<unsigned>(
std::chrono::system_clock::now().time_since_epoch().count()));
std::shuffle(Slices.begin(), Slices.end(), MT);
}
#endif
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
llvm::sort(Slices);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AllocaSlices::print(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
printSlice(OS, I, Indent);
OS << "\n";
printUse(OS, I, Indent);
}
void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
<< " slice #" << (I - begin())
<< (I->isSplittable() ? " (splittable)" : "");
}
void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
}
void AllocaSlices::print(raw_ostream &OS) const {
if (PointerEscapingInstr) {
OS << "Can't analyze slices for alloca: " << AI << "\n"
<< " A pointer to this alloca escaped by:\n"
<< " " << *PointerEscapingInstr << "\n";
return;
}
OS << "Slices of alloca: " << AI << "\n";
for (const_iterator I = begin(), E = end(); I != E; ++I)
print(OS, I);
}
LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
print(dbgs(), I);
}
LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Walk the range of a partitioning looking for a common type to cover this
/// sequence of slices.
static Type *findCommonType(AllocaSlices::const_iterator B,
AllocaSlices::const_iterator E,
uint64_t EndOffset) {
Type *Ty = nullptr;
bool TyIsCommon = true;
IntegerType *ITy = nullptr;
// Note that we need to look at *every* alloca slice's Use to ensure we
// always get consistent results regardless of the order of slices.
for (AllocaSlices::const_iterator I = B; I != E; ++I) {
Use *U = I->getUse();
if (isa<IntrinsicInst>(*U->getUser()))
continue;
if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
continue;
Type *UserTy = nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
UserTy = LI->getType();
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
UserTy = SI->getValueOperand()->getType();
}
if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
// If the type is larger than the partition, skip it. We only encounter
// this for split integer operations where we want to use the type of the
// entity causing the split. Also skip if the type is not a byte width
// multiple.
if (UserITy->getBitWidth() % 8 != 0 ||
UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
continue;
// Track the largest bitwidth integer type used in this way in case there
// is no common type.
if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
ITy = UserITy;
}
// To avoid depending on the order of slices, Ty and TyIsCommon must not
// depend on types skipped above.
if (!UserTy || (Ty && Ty != UserTy))
TyIsCommon = false; // Give up on anything but an iN type.
else
Ty = UserTy;
}
return TyIsCommon ? Ty : ITy;
}
/// PHI instructions that use an alloca and are subsequently loaded can be
/// rewritten to load both input pointers in the pred blocks and then PHI the
/// results, allowing the load of the alloca to be promoted.
/// From this:
/// %P2 = phi [i32* %Alloca, i32* %Other]
/// %V = load i32* %P2
/// to:
/// %V1 = load i32* %Alloca -> will be mem2reg'd
/// ...
/// %V2 = load i32* %Other
/// ...
/// %V = phi [i32 %V1, i32 %V2]
///
/// We can do this to a select if its only uses are loads and if the operands
/// to the select can be loaded unconditionally.
///
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
static bool isSafePHIToSpeculate(PHINode &PN) {
const DataLayout &DL = PN.getModule()->getDataLayout();
// For now, we can only do this promotion if the load is in the same block
// as the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
// TODO: Allow stores.
BasicBlock *BB = PN.getParent();
unsigned MaxAlign = 0;
uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
APInt MaxSize(APWidth, 0);
bool HaveLoad = false;
for (User *U : PN.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
if (!LI || !LI->isSimple())
return false;
// For now we only allow loads in the same block as the PHI. This is
// a common case that happens when instcombine merges two loads through
// a PHI.
if (LI->getParent() != BB)
return false;
// Ensure that there are no instructions between the PHI and the load that
// could store.
for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
if (BBI->mayWriteToMemory())
return false;
uint64_t Size = DL.getTypeStoreSizeInBits(LI->getType());
MaxAlign = std::max(MaxAlign, LI->getAlignment());
MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
HaveLoad = true;
}
if (!HaveLoad)
return false;
// We can only transform this if it is safe to push the loads into the
// predecessor blocks. The only thing to watch out for is that we can't put
// a possibly trapping load in the predecessor if it is a critical edge.
for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator();
Value *InVal = PN.getIncomingValue(Idx);
// If the value is produced by the terminator of the predecessor (an
// invoke) or it has side-effects, there is no valid place to put a load
// in the predecessor.
if (TI == InVal || TI->mayHaveSideEffects())
return false;
// If the predecessor has a single successor, then the edge isn't
// critical.
if (TI->getNumSuccessors() == 1)
continue;
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI))
continue;
return false;
}
return true;
}
static void speculatePHINodeLoads(PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
Type *LoadTy = SomeLoad->getType();
IRBuilderTy PHIBuilder(&PN);
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
// Get the AA tags and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
AAMDNodes AATags;
SomeLoad->getAAMetadata(AATags);
unsigned Align = SomeLoad->getAlignment();
// Rewrite all loads of the PN to use the new PHI.
while (!PN.use_empty()) {
LoadInst *LI = cast<LoadInst>(PN.user_back());
LI->replaceAllUsesWith(NewPN);
LI->eraseFromParent();
}
// Inject loads into all of the pred blocks.
DenseMap<BasicBlock*, Value*> InjectedLoads;
for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
BasicBlock *Pred = PN.getIncomingBlock(Idx);
Value *InVal = PN.getIncomingValue(Idx);
// A PHI node is allowed to have multiple (duplicated) entries for the same
// basic block, as long as the value is the same. So if we already injected
// a load in the predecessor, then we should reuse the same load for all
// duplicated entries.
if (Value* V = InjectedLoads.lookup(Pred)) {
NewPN->addIncoming(V, Pred);
continue;
}
Instruction *TI = Pred->getTerminator();
IRBuilderTy PredBuilder(TI);
LoadInst *Load = PredBuilder.CreateLoad(
LoadTy, InVal,
(PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
if (AATags)
Load->setAAMetadata(AATags);
NewPN->addIncoming(Load, Pred);
InjectedLoads[Pred] = Load;
}
LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
PN.eraseFromParent();
}
/// Select instructions that use an alloca and are subsequently loaded can be
/// rewritten to load both input pointers and then select between the result,
/// allowing the load of the alloca to be promoted.
/// From this:
/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
/// %V = load i32* %P2
/// to:
/// %V1 = load i32* %Alloca -> will be mem2reg'd
/// %V2 = load i32* %Other
/// %V = select i1 %cond, i32 %V1, i32 %V2
///
/// We can do this to a select if its only uses are loads and if the operand
/// to the select can be loaded unconditionally.
static bool isSafeSelectToSpeculate(SelectInst &SI) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
const DataLayout &DL = SI.getModule()->getDataLayout();
for (User *U : SI.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
if (!LI || !LI->isSimple())
return false;
// Both operands to the select need to be dereferenceable, either
// absolutely (e.g. allocas) or at this point because we can see other
// accesses to it.
if (!isSafeToLoadUnconditionally(TValue, LI->getType(), LI->getAlignment(),
DL, LI))
return false;
if (!isSafeToLoadUnconditionally(FValue, LI->getType(), LI->getAlignment(),
DL, LI))
return false;
}
return true;
}
static void speculateSelectInstLoads(SelectInst &SI) {
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
IRBuilderTy IRB(&SI);
Value *TV = SI.getTrueValue();
Value *FV = SI.getFalseValue();
// Replace the loads of the select with a select of two loads.
while (!SI.use_empty()) {
LoadInst *LI = cast<LoadInst>(SI.user_back());
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
LoadInst *TL = IRB.CreateLoad(LI->getType(), TV,
LI->getName() + ".sroa.speculate.load.true");
LoadInst *FL = IRB.CreateLoad(LI->getType(), FV,
LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
// Transfer alignment and AA info if present.
TL->setAlignment(LI->getAlignment());
FL->setAlignment(LI->getAlignment());
AAMDNodes Tags;
LI->getAAMetadata(Tags);
if (Tags) {
TL->setAAMetadata(Tags);
FL->setAAMetadata(Tags);
}
Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
LI->getName() + ".sroa.speculated");
LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n");
LI->replaceAllUsesWith(V);
LI->eraseFromParent();
}
SI.eraseFromParent();
}
/// Build a GEP out of a base pointer and indices.
///
/// This will return the BasePtr if that is valid, or build a new GEP
/// instruction using the IRBuilder if GEP-ing is needed.
static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
if (Indices.empty())
return BasePtr;
// A single zero index is a no-op, so check for this and avoid building a GEP
// in that case.
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(),
BasePtr, Indices, NamePrefix + "sroa_idx");
}
/// Get a natural GEP off of the BasePtr walking through Ty toward
/// TargetTy without changing the offset of the pointer.
///
/// This routine assumes we've already established a properly offset GEP with
/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
/// zero-indices down through type layers until we find one the same as
/// TargetTy. If we can't find one with the same type, we at least try to use
/// one with the same size. If none of that works, we just produce the GEP as
/// indicated by Indices to have the correct offset.
static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
Value *BasePtr, Type *Ty, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
Twine NamePrefix) {
if (Ty == TargetTy)
return buildGEP(IRB, BasePtr, Indices, NamePrefix);
// Offset size to use for the indices.
unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType());
// See if we can descend into a struct and locate a field with the correct
// type.
unsigned NumLayers = 0;
Type *ElementTy = Ty;
do {
if (ElementTy->isPointerTy())
break;
if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
ElementTy = ArrayTy->getElementType();
Indices.push_back(IRB.getIntN(OffsetSize, 0));
} else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
ElementTy = VectorTy->getElementType();
Indices.push_back(IRB.getInt32(0));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; // Nothing left to descend into.
ElementTy = *STy->element_begin();
Indices.push_back(IRB.getInt32(0));
} else {
break;
}
++NumLayers;
} while (ElementTy != TargetTy);
if (ElementTy != TargetTy)
Indices.erase(Indices.end() - NumLayers, Indices.end());
return buildGEP(IRB, BasePtr, Indices, NamePrefix);
}
/// Recursively compute indices for a natural GEP.
///
/// This is the recursive step for getNaturalGEPWithOffset that walks down the
/// element types adding appropriate indices for the GEP.
static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
Twine NamePrefix) {
if (Offset == 0)
return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices,
NamePrefix);
// We can't recurse through pointer types.
if (Ty->isPointerTy())
return nullptr;
// We try to analyze GEPs over vectors here, but note that these GEPs are
// extremely poorly defined currently. The long-term goal is to remove GEPing
// over a vector from the IR completely.
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
if (ElementSizeInBits % 8 != 0) {
// GEPs over non-multiple of 8 size vector elements are invalid.
return nullptr;
}
APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
Offset, TargetTy, Indices, NamePrefix);
}
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
Type *ElementTy = ArrTy->getElementType();
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Indices, NamePrefix);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
return nullptr;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
return nullptr; // The offset points into alignment padding.
Indices.push_back(IRB.getInt32(Index));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Indices, NamePrefix);
}
/// Get a natural GEP from a base pointer to a particular offset and
/// resulting in a particular type.
///
/// The goal is to produce a "natural" looking GEP that works with the existing
/// composite types to arrive at the appropriate offset and element type for
/// a pointer. TargetTy is the element type the returned GEP should point-to if
/// possible. We recurse by decreasing Offset, adding the appropriate index to
/// Indices, and setting Ty to the result subtype.
///
/// If no natural GEP can be constructed, this function returns null.
static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, APInt Offset, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
Twine NamePrefix) {
PointerType *Ty = cast<PointerType>(Ptr->getType());
// Don't consider any GEPs through an i8* as natural unless the TargetTy is
// an i8.
if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
return nullptr;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
return nullptr; // We can't GEP through an unsized element.
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
return nullptr; // Zero-length arrays can't help us build a natural GEP.
APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
Indices, NamePrefix);
}
/// Compute an adjusted pointer from Ptr by Offset bytes where the
/// resulting pointer has PointerTy.
///
/// This tries very hard to compute a "natural" GEP which arrives at the offset
/// and produces the pointer type desired. Where it cannot, it will try to use
/// the natural GEP to arrive at the offset and bitcast to the type. Where that
/// fails, it will try to use an existing i8* and GEP to the byte offset and
/// bitcast to the type.
///
/// The strategy for finding the more natural GEPs is to peel off layers of the
/// pointer, walking back through bit casts and GEPs, searching for a base
/// pointer from which we can compute a natural GEP with the desired
/// properties. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
APInt Offset, Type *PointerTy, Twine NamePrefix) {
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
Visited.insert(Ptr);
SmallVector<Value *, 4> Indices;
// We may end up computing an offset pointer that has the wrong type. If we
// never are able to compute one directly that has the correct type, we'll
// fall back to it, so keep it and the base it was computed from around here.
Value *OffsetPtr = nullptr;
Value *OffsetBasePtr;
// Remember any i8 pointer we come across to re-use if we need to do a raw
// byte offset.
Value *Int8Ptr = nullptr;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
Type *TargetTy = TargetPtrTy->getElementType();
// As `addrspacecast` is , `Ptr` (the storage pointer) may have different
// address space from the expected `PointerTy` (the pointer to be used).
// Adjust the pointer type based the original storage pointer.
auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
PointerTy = TargetTy->getPointerTo(AS);
do {
// First fold any existing GEPs into the offset.
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
APInt GEPOffset(Offset.getBitWidth(), 0);
if (!GEP->accumulateConstantOffset(DL, GEPOffset))
break;
Offset += GEPOffset;
Ptr = GEP->getPointerOperand();
if (!Visited.insert(Ptr).second)
break;
}
// See if we can perform a natural GEP here.
Indices.clear();
if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
Indices, NamePrefix)) {
// If we have a new natural pointer at the offset, clear out any old
// offset pointer we computed. Unless it is the base pointer or
// a non-instruction, we built a GEP we don't need. Zap it.
if (OffsetPtr && OffsetPtr != OffsetBasePtr)
if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
assert(I->use_empty() && "Built a GEP with uses some how!");
I->eraseFromParent();
}
OffsetPtr = P;
OffsetBasePtr = Ptr;
// If we also found a pointer of the right type, we're done.
if (P->getType() == PointerTy)
break;
}
// Stash this pointer if we've found an i8*.
if (Ptr->getType()->isIntegerTy(8)) {
Int8Ptr = Ptr;
Int8PtrOffset = Offset;
}
// Peel off a layer of the pointer and update the offset appropriately.
if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
Ptr = cast<Operator>(Ptr)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
if (GA->isInterposable())
break;
Ptr = GA->getAliasee();
} else {
break;
}
assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(Ptr).second);
if (!OffsetPtr) {
if (!Int8Ptr) {
Int8Ptr = IRB.CreateBitCast(
Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
NamePrefix + "sroa_raw_cast");
Int8PtrOffset = Offset;
}
OffsetPtr = Int8PtrOffset == 0
? Int8Ptr
: IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
IRB.getInt(Int8PtrOffset),
NamePrefix + "sroa_raw_idx");
}
Ptr = OffsetPtr;
// On the off chance we were targeting i8*, guard the bitcast here.
if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) {
Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr,
TargetPtrTy,
NamePrefix + "sroa_cast");
}
return Ptr;
}
/// Compute the adjusted alignment for a load or store from an offset.
static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset,
const DataLayout &DL) {
unsigned Alignment;
Type *Ty;
if (auto *LI = dyn_cast<LoadInst>(I)) {
Alignment = LI->getAlignment();
Ty = LI->getType();
} else if (auto *SI = dyn_cast<StoreInst>(I)) {
Alignment = SI->getAlignment();
Ty = SI->getValueOperand()->getType();
} else {
llvm_unreachable("Only loads and stores are allowed!");
}
if (!Alignment)
Alignment = DL.getABITypeAlignment(Ty);
return MinAlign(Alignment, Offset);
}
/// Test whether we can convert a value from the old to the new type.
///
/// This predicate should be used to guard calls to convertValue in order to
/// ensure that we only try to convert viable values. The strategy is that we
/// will peel off single element struct and array wrappings to get to an
/// underlying value, and convert that value.
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
if (OldTy == NewTy)
return true;
// For integer types, we can't handle any bit-width differences. This would
// break both vector conversions with extension and introduce endianness
// issues when in conjunction with loads and stores.
if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) {
assert(cast<IntegerType>(OldTy)->getBitWidth() !=
cast<IntegerType>(NewTy)->getBitWidth() &&
"We can't have the same bitwidth for different int types");
return false;
}
if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
return false;
if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
return false;
// We can convert pointers to integers and vice-versa. Same for vectors
// of pointers and integers.
OldTy = OldTy->getScalarType();
NewTy = NewTy->getScalarType();
if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
if (NewTy->isPointerTy() && OldTy->isPointerTy()) {
return cast<PointerType>(NewTy)->getPointerAddressSpace() ==
cast<PointerType>(OldTy)->getPointerAddressSpace();
}
// We can convert integers to integral pointers, but not to non-integral
// pointers.
if (OldTy->isIntegerTy())
return !DL.isNonIntegralPointerType(NewTy);
// We can convert integral pointers to integers, but non-integral pointers
// need to remain pointers.
if (!DL.isNonIntegralPointerType(OldTy))
return NewTy->isIntegerTy();
return false;
}
return true;
}
/// Generic routine to convert an SSA value to a value of a different
/// type.
///
/// This will try various different casting techniques, such as bitcasts,
/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
/// two types for viability with this routine.
static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
Type *NewTy) {
Type *OldTy = V->getType();
assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
if (OldTy == NewTy)
return V;
assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
"Integer types must be the exact same to convert.");
// See if we need inttoptr for this type pair. A cast involving both scalars
// and vectors requires and additional bitcast.
if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
// Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
if (OldTy->isVectorTy() && !NewTy->isVectorTy())
return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
NewTy);
// Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
if (!OldTy->isVectorTy() && NewTy->isVectorTy())
return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
NewTy);
return IRB.CreateIntToPtr(V, NewTy);
}
// See if we need ptrtoint for this type pair. A cast involving both scalars
// and vectors requires and additional bitcast.
if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
// Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
if (OldTy->isVectorTy() && !NewTy->isVectorTy())
return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
NewTy);
// Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
if (!OldTy->isVectorTy() && NewTy->isVectorTy())
return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
NewTy);
return IRB.CreatePtrToInt(V, NewTy);
}
return IRB.CreateBitCast(V, NewTy);
}
/// Test whether the given slice use can be promoted to a vector.
///
/// This function is called to test each entry in a partition which is slated
/// for a single slice.
static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
VectorType *Ty,
uint64_t ElementSize,
const DataLayout &DL) {
// First validate the slice offsets.
uint64_t BeginOffset =
std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
uint64_t BeginIndex = BeginOffset / ElementSize;
if (BeginIndex * ElementSize != BeginOffset ||
BeginIndex >= Ty->getNumElements())
return false;
uint64_t EndOffset =
std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
uint64_t EndIndex = EndOffset / ElementSize;
if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
return false;
assert(EndIndex > BeginIndex && "Empty vector!");
uint64_t NumElements = EndIndex - BeginIndex;
Type *SliceTy = (NumElements == 1)
? Ty->getElementType()
: VectorType::get(Ty->getElementType(), NumElements);
Type *SplitIntTy =
Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
Use *U = S.getUse();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile())
return false;
if (!S.isSplittable())
return false; // Skip any unsplittable intrinsics.
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
if (!II->isLifetimeStartOrEnd())
return false;
} else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
// Disable vector promotion when there are loads or stores of an FCA.
return false;
} else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
if (LI->isVolatile())
return false;
Type *LTy = LI->getType();
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
assert(LTy->isIntegerTy());
LTy = SplitIntTy;
}
if (!canConvertValue(DL, SliceTy, LTy))
return false;
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
if (SI->isVolatile())
return false;
Type *STy = SI->getValueOperand()->getType();
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
assert(STy->isIntegerTy());
STy = SplitIntTy;
}
if (!canConvertValue(DL, STy, SliceTy))
return false;
} else {
return false;
}
return true;
}
/// Test whether the given alloca partitioning and range of slices can be
/// promoted to a vector.
///
/// This is a quick test to check whether we can rewrite a particular alloca
/// partition (and its newly formed alloca) into a vector alloca with only
/// whole-vector loads and stores such that it could be promoted to a vector
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
// Collect the candidate types for vector-based promotion. Also track whether
// we have different element types.
SmallVector<VectorType *, 4> CandidateTys;
Type *CommonEltTy = nullptr;
bool HaveCommonEltTy = true;
auto CheckCandidateType = [&](Type *Ty) {
if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ // Return if bitcast to vectors is different for total size in bits.
+ if (!CandidateTys.empty()) {
+ VectorType *V = CandidateTys[0];
+ if (DL.getTypeSizeInBits(VTy) != DL.getTypeSizeInBits(V)) {
+ CandidateTys.clear();
+ return;
+ }
+ }
CandidateTys.push_back(VTy);
if (!CommonEltTy)
CommonEltTy = VTy->getElementType();
else if (CommonEltTy != VTy->getElementType())
HaveCommonEltTy = false;
}
};
// Consider any loads or stores that are the exact size of the slice.
for (const Slice &S : P)
if (S.beginOffset() == P.beginOffset() &&
S.endOffset() == P.endOffset()) {
if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
CheckCandidateType(LI->getType());
else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
CheckCandidateType(SI->getValueOperand()->getType());
}
// If we didn't find a vector type, nothing to do here.
if (CandidateTys.empty())
return nullptr;
// Remove non-integer vector types if we had multiple common element types.
// FIXME: It'd be nice to replace them with integer vector types, but we can't
// do that until all the backends are known to produce good code for all
// integer vector types.
if (!HaveCommonEltTy) {
CandidateTys.erase(
llvm::remove_if(CandidateTys,
[](VectorType *VTy) {
return !VTy->getElementType()->isIntegerTy();
}),
CandidateTys.end());
// If there were no integer vector types, give up.
if (CandidateTys.empty())
return nullptr;
// Rank the remaining candidate vector types. This is easy because we know
// they're all integer vectors. We sort by ascending number of elements.
auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
(void)DL;
assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) &&
"Cannot have vector types of different sizes!");
assert(RHSTy->getElementType()->isIntegerTy() &&
"All non-integer types eliminated!");
assert(LHSTy->getElementType()->isIntegerTy() &&
"All non-integer types eliminated!");
return RHSTy->getNumElements() < LHSTy->getNumElements();
};
llvm::sort(CandidateTys, RankVectorTypes);
CandidateTys.erase(
std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
CandidateTys.end());
} else {
// The only way to have the same element type in every vector type is to
// have the same vector type. Check that and remove all but one.
#ifndef NDEBUG
for (VectorType *VTy : CandidateTys) {
assert(VTy->getElementType() == CommonEltTy &&
"Unaccounted for element type!");
assert(VTy == CandidateTys[0] &&
"Different vector types with the same element type!");
}
#endif
CandidateTys.resize(1);
}
// Try each vector type, and return the one which works.
auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType());
// While the definition of LLVM vectors is bitpacked, we don't support sizes
// that aren't byte sized.
if (ElementSize % 8)
return false;
assert((DL.getTypeSizeInBits(VTy) % 8) == 0 &&
"vector size not a multiple of element size?");
ElementSize /= 8;
for (const Slice &S : P)
if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
return false;
for (const Slice *S : P.splitSliceTails())
if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
return false;
return true;
};
for (VectorType *VTy : CandidateTys)
if (CheckVectorTypeForPromotion(VTy))
return VTy;
return nullptr;
}
/// Test whether a slice of an alloca is valid for integer widening.
///
/// This implements the necessary checking for the \c isIntegerWideningViable
/// test below on a single slice of the alloca.
static bool isIntegerWideningViableForSlice(const Slice &S,
uint64_t AllocBeginOffset,
Type *AllocaTy,
const DataLayout &DL,
bool &WholeAllocaOp) {
uint64_t Size = DL.getTypeStoreSize(AllocaTy);
uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
// We can't reasonably handle cases where the load or store extends past
// the end of the alloca's type and into its padding.
if (RelEnd > Size)
return false;
Use *U = S.getUse();
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
if (LI->isVolatile())
return false;
// We can't handle loads that extend past the allocated memory.
if (DL.getTypeStoreSize(LI->getType()) > Size)
return false;
// So far, AllocaSliceRewriter does not support widening split slice tails
// in rewriteIntegerLoad.
if (S.beginOffset() < AllocBeginOffset)
return false;
// Note that we don't count vector loads or stores as whole-alloca
// operations which enable integer widening because we would prefer to use
// vector widening instead.
if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
} else if (RelBegin != 0 || RelEnd != Size ||
!canConvertValue(DL, AllocaTy, LI->getType())) {
// Non-integer loads need to be convertible from the alloca type so that
// they are promotable.
return false;
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
Type *ValueTy = SI->getValueOperand()->getType();
if (SI->isVolatile())
return false;
// We can't handle stores that extend past the allocated memory.
if (DL.getTypeStoreSize(ValueTy) > Size)
return false;
// So far, AllocaSliceRewriter does not support widening split slice tails
// in rewriteIntegerStore.
if (S.beginOffset() < AllocBeginOffset)
return false;
// Note that we don't count vector loads or stores as whole-alloca
// operations which enable integer widening because we would prefer to use
// vector widening instead.
if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
} else if (RelBegin != 0 || RelEnd != Size ||
!canConvertValue(DL, ValueTy, AllocaTy)) {
// Non-integer stores need to be convertible to the alloca type so that
// they are promotable.
return false;
}
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
return false;
if (!S.isSplittable())
return false; // Skip any unsplittable intrinsics.
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
if (!II->isLifetimeStartOrEnd())
return false;
} else {
return false;
}
return true;
}
/// Test whether the given alloca partition's integer operations can be
/// widened to promotable ones.
///
/// This is a quick test to check whether we can rewrite the integer loads and
/// stores to a particular alloca into wider loads and stores and be able to
/// promote the resulting alloca.
static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
const DataLayout &DL) {
uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
// Don't create integer types larger than the maximum bitwidth.
if (SizeInBits > IntegerType::MAX_INT_BITS)
return false;
// Don't try to handle allocas with bit-padding.
if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
return false;
// We need to ensure that an integer type with the appropriate bitwidth can
// be converted to the alloca type, whatever that is. We don't want to force
// the alloca itself to have an integer type if there is a more suitable one.
Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
if (!canConvertValue(DL, AllocaTy, IntTy) ||
!canConvertValue(DL, IntTy, AllocaTy))
return false;
// While examining uses, we ensure that the alloca has a covering load or
// store. We don't want to widen the integer operations only to fail to
// promote due to some other unsplittable entry (which we may make splittable
// later). However, if there are only splittable uses, go ahead and assume
// that we cover the alloca.
// FIXME: We shouldn't consider split slices that happen to start in the
// partition here...
bool WholeAllocaOp =
P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits);
for (const Slice &S : P)
if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
WholeAllocaOp))
return false;
for (const Slice *S : P.splitSliceTails())
if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
WholeAllocaOp))
return false;
return WholeAllocaOp;
}
static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
IntegerType *Ty, uint64_t Offset,
const Twine &Name) {
LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
IntegerType *IntTy = cast<IntegerType>(V->getType());
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element extends past full value");
uint64_t ShAmt = 8 * Offset;
if (DL.isBigEndian())
ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
}
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot extract to a larger integer!");
if (Ty != IntTy) {
V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n");
}
return V;
}
static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
Value *V, uint64_t Offset, const Twine &Name) {
IntegerType *IntTy = cast<IntegerType>(Old->getType());
IntegerType *Ty = cast<IntegerType>(V->getType());
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot insert a larger integer!");
LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
if (Ty != IntTy) {
V = IRB.CreateZExt(V, IntTy, Name + ".ext");
LLVM_DEBUG(dbgs() << " extended: " << *V << "\n");
}
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element store outside of alloca store");
uint64_t ShAmt = 8 * Offset;
if (DL.isBigEndian())
ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateShl(V, ShAmt, Name + ".shift");
LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
}
if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n");
V = IRB.CreateOr(Old, V, Name + ".insert");
LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n");
}
return V;
}
static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
unsigned EndIndex, const Twine &Name) {
VectorType *VecTy = cast<VectorType>(V->getType());
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
if (NumElements == VecTy->getNumElements())
return V;
if (NumElements == 1) {
V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
Name + ".extract");
LLVM_DEBUG(dbgs() << " extract: " << *V << "\n");
return V;
}
SmallVector<Constant *, 8> Mask;
Mask.reserve(NumElements);
for (unsigned i = BeginIndex; i != EndIndex; ++i)
Mask.push_back(IRB.getInt32(i));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask), Name + ".extract");
LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
return V;
}
static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
unsigned BeginIndex, const Twine &Name) {
VectorType *VecTy = cast<VectorType>(Old->getType());
assert(VecTy && "Can only insert a vector into a vector");
VectorType *Ty = dyn_cast<VectorType>(V->getType());
if (!Ty) {
// Single element to insert.
V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
Name + ".insert");
LLVM_DEBUG(dbgs() << " insert: " << *V << "\n");
return V;
}
assert(Ty->getNumElements() <= VecTy->getNumElements() &&
"Too many elements!");
if (Ty->getNumElements() == VecTy->getNumElements()) {
assert(V->getType() == VecTy && "Vector type mismatch");
return V;
}
unsigned EndIndex = BeginIndex + Ty->getNumElements();
// When inserting a smaller vector into the larger to store, we first
// use a shuffle vector to widen it with undef elements, and then
// a second shuffle vector to select between the loaded vector and the
// incoming vector.
SmallVector<Constant *, 8> Mask;
Mask.reserve(VecTy->getNumElements());
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
if (i >= BeginIndex && i < EndIndex)
Mask.push_back(IRB.getInt32(i - BeginIndex));
else
Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask), Name + ".expand");
LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
Mask.clear();
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
LLVM_DEBUG(dbgs() << " blend: " << *V << "\n");
return V;
}
/// Visitor to rewrite instructions using p particular slice of an alloca
/// to use a new alloca.
///
/// Also implements the rewriting to vector-based accesses when the partition
/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
/// lives here.
class llvm::sroa::AllocaSliceRewriter
: public InstVisitor<AllocaSliceRewriter, bool> {
// Befriend the base class so it can delegate to private visit methods.
friend class InstVisitor<AllocaSliceRewriter, bool>;
using Base = InstVisitor<AllocaSliceRewriter, bool>;
const DataLayout &DL;
AllocaSlices &AS;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
Type *NewAllocaTy;
// This is a convenience and flag variable that will be null unless the new
// alloca's integer operations should be widened to this integer type due to
// passing isIntegerWideningViable above. If it is non-null, the desired
// integer type will be stored here for easy access during rewriting.
IntegerType *IntTy;
// If we are rewriting an alloca partition which can be written as pure
// vector operations, we stash extra information here. When VecTy is
// non-null, we have some strict guarantees about the rewritten alloca:
// - The new alloca is exactly the size of the vector type here.
// - The accesses all either map to the entire vector or to a single
// element.
// - The set of accessing instructions is only one of those handled above
// in isVectorPromotionViable. Generally these are the same access kinds
// which are promotable via mem2reg.
VectorType *VecTy;
Type *ElementTy;
uint64_t ElementSize;
// The original offset of the slice currently being rewritten relative to
// the original alloca.
uint64_t BeginOffset = 0;
uint64_t EndOffset = 0;
// The new offsets of the slice currently being rewritten relative to the
// original alloca.
uint64_t NewBeginOffset, NewEndOffset;
uint64_t SliceSize;
bool IsSplittable = false;
bool IsSplit = false;
Use *OldUse = nullptr;
Instruction *OldPtr = nullptr;
// Track post-rewrite users which are PHI nodes and Selects.
SmallSetVector<PHINode *, 8> &PHIUsers;
SmallSetVector<SelectInst *, 8> &SelectUsers;
// Utility IR builder, whose name prefix is setup for each visited use, and
// the insertion point is set to point to the user.
IRBuilderTy IRB;
public:
AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass,
AllocaInst &OldAI, AllocaInst &NewAI,
uint64_t NewAllocaBeginOffset,
uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
VectorType *PromotableVecTy,
SmallSetVector<PHINode *, 8> &PHIUsers,
SmallSetVector<SelectInst *, 8> &SelectUsers)
: DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
NewAllocaBeginOffset(NewAllocaBeginOffset),
NewAllocaEndOffset(NewAllocaEndOffset),
NewAllocaTy(NewAI.getAllocatedType()),
IntTy(IsIntegerPromotable
? Type::getIntNTy(
NewAI.getContext(),
DL.getTypeSizeInBits(NewAI.getAllocatedType()))
: nullptr),
VecTy(PromotableVecTy),
ElementTy(VecTy ? VecTy->getElementType() : nullptr),
ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
PHIUsers(PHIUsers), SelectUsers(SelectUsers),
IRB(NewAI.getContext(), ConstantFolder()) {
if (VecTy) {
assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
"Only multiple-of-8 sized vector elements are viable");
++NumVectorized;
}
assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
}
bool visit(AllocaSlices::const_iterator I) {
bool CanSROA = true;
BeginOffset = I->beginOffset();
EndOffset = I->endOffset();
IsSplittable = I->isSplittable();
IsSplit =
BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
LLVM_DEBUG(AS.printSlice(dbgs(), I, ""));
LLVM_DEBUG(dbgs() << "\n");
// Compute the intersecting offset range.
assert(BeginOffset < NewAllocaEndOffset);
assert(EndOffset > NewAllocaBeginOffset);
NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
SliceSize = NewEndOffset - NewBeginOffset;
OldUse = I->getUse();
OldPtr = cast<Instruction>(OldUse->get());
Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
IRB.SetInsertPoint(OldUserI);
IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
if (VecTy || IntTy)
assert(CanSROA);
return CanSROA;
}
private:
// Make sure the other visit overloads are visible.
using Base::visit;
// Every instruction which can end up as a user must have a rewrite rule.
bool visitInstruction(Instruction &I) {
LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
llvm_unreachable("No rewrite rule for this instruction!");
}
Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
// Note that the offset computation can use BeginOffset or NewBeginOffset
// interchangeably for unsplit slices.
assert(IsSplit || BeginOffset == NewBeginOffset);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
#ifndef NDEBUG
StringRef OldName = OldPtr->getName();
// Skip through the last '.sroa.' component of the name.
size_t LastSROAPrefix = OldName.rfind(".sroa.");
if (LastSROAPrefix != StringRef::npos) {
OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
// Look for an SROA slice index.
size_t IndexEnd = OldName.find_first_not_of("0123456789");
if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
// Strip the index and look for the offset.
OldName = OldName.substr(IndexEnd + 1);
size_t OffsetEnd = OldName.find_first_not_of("0123456789");
if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
// Strip the offset.
OldName = OldName.substr(OffsetEnd + 1);
}
}
// Strip any SROA suffixes as well.
OldName = OldName.substr(0, OldName.find(".sroa_"));
#endif
return getAdjustedPtr(IRB, DL, &NewAI,
APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset),
PointerTy,
#ifndef NDEBUG
Twine(OldName) + "."
#else
Twine()
#endif
);
}
/// Compute suitable alignment to access this slice of the *new*
/// alloca.
///
/// You can optionally pass a type to this routine and if that type's ABI
/// alignment is itself suitable, this will return zero.
unsigned getSliceAlign(Type *Ty = nullptr) {
unsigned NewAIAlign = NewAI.getAlignment();
if (!NewAIAlign)
NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
unsigned Align =
MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
}
unsigned getIndex(uint64_t Offset) {
assert(VecTy && "Can only call getIndex when rewriting a vector");
uint64_t RelOffset = Offset - NewAllocaBeginOffset;
assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
uint32_t Index = RelOffset / ElementSize;
assert(Index * ElementSize == RelOffset);
return Index;
}
void deleteIfTriviallyDead(Value *V) {
Instruction *I = cast<Instruction>(V);
if (isInstructionTriviallyDead(I))
Pass.DeadInsts.insert(I);
}
Value *rewriteVectorizedLoadInst() {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) {
IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8);
V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract");
}
// It is possible that the extracted type is not the load type. This
// happens if there is a load past the end of the alloca, and as
// a consequence the slice is narrower but still a candidate for integer
// lowering. To handle this case, we just zero extend the extracted
// integer.
assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 &&
"Can only handle an extract for an overly wide load");
if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8)
V = IRB.CreateZExt(V, LI.getType());
return V;
}
bool visitLoadInst(LoadInst &LI) {
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
AAMDNodes AATags;
LI.getAAMetadata(AATags);
unsigned AS = LI.getPointerAddressSpace();
Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
: LI.getType();
const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize;
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
V = rewriteIntegerLoad(LI);
} else if (NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset &&
(canConvertValue(DL, NewAllocaTy, TargetTy) ||
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy()))) {
LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
// Any !nonnull metadata or !range metadata on the old load is also valid
// on the new load. This is even true in some cases even when the loads
// are different types, for example by mapping !nonnull metadata to
// !range metadata by modeling the null pointer constant converted to the
// integer type.
// FIXME: Add support for range metadata here. Currently the utilities
// for this don't propagate range metadata in trivial cases from one
// integer load to another, don't handle non-addrspace-0 null pointers
// correctly, and don't have any support for mapping ranges as the
// integer type becomes winder or narrower.
if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
copyNonnullMetadata(LI, N, *NewLI);
// Try to preserve nonnull metadata
V = NewLI;
// If this is an integer load past the end of the slice (which means the
// bytes outside the slice are undef or this load is dead) just forcibly
// fix the integer size with correct handling of endianness.
if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
if (auto *TITy = dyn_cast<IntegerType>(TargetTy))
if (AITy->getBitWidth() < TITy->getBitWidth()) {
V = IRB.CreateZExt(V, TITy, "load.ext");
if (DL.isBigEndian())
V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(),
"endian_shift");
}
} else {
Type *LTy = TargetTy->getPointerTo(AS);
LoadInst *NewLI = IRB.CreateAlignedLoad(
TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
V = NewLI;
IsPtrAdjusted = true;
}
V = convertValue(DL, IRB, V, TargetTy);
if (IsSplit) {
assert(!LI.isVolatile());
assert(LI.getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
"Split load isn't smaller than original load");
assert(DL.typeSizeEqualsStoreSize(LI.getType()) &&
"Non-byte-multiple bit width");
// Move the insertion point just past the load so that we can refer to it.
IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI)));
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
Value *Placeholder = new LoadInst(
LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)));
V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
"insert");
LI.replaceAllUsesWith(V);
Placeholder->replaceAllUsesWith(&LI);
Placeholder->deleteValue();
} else {
LI.replaceAllUsesWith(V);
}
Pass.DeadInsts.insert(&LI);
deleteIfTriviallyDead(OldOp);
LLVM_DEBUG(dbgs() << " to: " << *V << "\n");
return !LI.isVolatile() && !IsPtrAdjusted;
}
bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
AAMDNodes AATags) {
if (V->getType() != VecTy) {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Type *SliceTy = (NumElements == 1)
? ElementTy
: VectorType::get(ElementTy, NumElements);
if (V->getType() != SliceTy)
V = convertValue(DL, IRB, V, SliceTy);
// Mix in the existing elements.
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
if (AATags)
Store->setAAMetadata(AATags);
Pass.DeadInsts.insert(&SI);
LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
}
V = convertValue(DL, IRB, V, NewAllocaTy);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
if (AATags)
Store->setAAMetadata(AATags);
Pass.DeadInsts.insert(&SI);
LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
bool visitStoreInst(StoreInst &SI) {
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
Value *OldOp = SI.getOperand(1);
assert(OldOp == OldPtr);
AAMDNodes AATags;
SI.getAAMetadata(AATags);
Value *V = SI.getValueOperand();
// Strip all inbounds GEPs and pointer casts to try to dig out any root
// alloca that should be re-examined after promoting this alloca.
if (V->getType()->isPointerTy())
if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
if (SliceSize < DL.getTypeStoreSize(V->getType())) {
assert(!SI.isVolatile());
assert(V->getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(DL.typeSizeEqualsStoreSize(V->getType()) &&
"Non-byte-multiple bit width");
IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset,
"extract");
}
if (VecTy)
return rewriteVectorizedStoreInst(V, SI, OldOp, AATags);
if (IntTy && V->getType()->isIntegerTy())
return rewriteIntegerStore(V, SI, AATags);
const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize;
StoreInst *NewSI;
if (NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset &&
(canConvertValue(DL, V->getType(), NewAllocaTy) ||
(IsStorePastEnd && NewAllocaTy->isIntegerTy() &&
V->getType()->isIntegerTy()))) {
// If this is an integer store past the end of slice (and thus the bytes
// past that point are irrelevant or this is unreachable), truncate the
// value prior to storing.
if (auto *VITy = dyn_cast<IntegerType>(V->getType()))
if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
if (VITy->getBitWidth() > AITy->getBitWidth()) {
if (DL.isBigEndian())
V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(),
"endian_shift");
V = IRB.CreateTrunc(V, AITy, "load.trunc");
}
V = convertValue(DL, IRB, V, NewAllocaTy);
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
} else {
unsigned AS = SI.getPointerAddressSpace();
Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
SI.isVolatile());
}
NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
if (AATags)
NewSI->setAAMetadata(AATags);
if (SI.isVolatile())
NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);
LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n");
return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
}
/// Compute an integer value from splatting an i8 across the given
/// number of bytes.
///
/// Note that this routine assumes an i8 is a byte. If that isn't true, don't
/// call this routine.
/// FIXME: Heed the advice above.
///
/// \param V The i8 value to splat.
/// \param Size The number of bytes in the output (assuming i8 is one byte)
Value *getIntegerSplat(Value *V, unsigned Size) {
assert(Size > 0 && "Expected a positive number of bytes.");
IntegerType *VTy = cast<IntegerType>(V->getType());
assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
if (Size == 1)
return V;
Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8);
V = IRB.CreateMul(
IRB.CreateZExt(V, SplatIntTy, "zext"),
ConstantExpr::getUDiv(
Constant::getAllOnesValue(SplatIntTy),
ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()),
SplatIntTy)),
"isplat");
return V;
}
/// Compute a vector splat for a given element value.
Value *getVectorSplat(Value *V, unsigned NumElements) {
V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
LLVM_DEBUG(dbgs() << " splat: " << *V << "\n");
return V;
}
bool visitMemSetInst(MemSetInst &II) {
LLVM_DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getRawDest() == OldPtr);
AAMDNodes AATags;
II.getAAMetadata(AATags);
// If the memset has a variable size, it cannot be split, just adjust the
// pointer to the new alloca.
if (!isa<Constant>(II.getLength())) {
assert(!IsSplit);
assert(NewBeginOffset == BeginOffset);
II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
II.setDestAlignment(getSliceAlign());
deleteIfTriviallyDead(OldPtr);
return false;
}
// Record this instruction for deletion.
Pass.DeadInsts.insert(&II);
Type *AllocaTy = NewAI.getAllocatedType();
Type *ScalarTy = AllocaTy->getScalarType();
const bool CanContinue = [&]() {
if (VecTy || IntTy)
return true;
if (BeginOffset > NewAllocaBeginOffset ||
EndOffset < NewAllocaEndOffset)
return false;
auto *C = cast<ConstantInt>(II.getLength());
if (C->getBitWidth() > 64)
return false;
const auto Len = C->getZExtValue();
auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext());
auto *SrcTy = VectorType::get(Int8Ty, Len);
return canConvertValue(DL, SrcTy, AllocaTy) &&
DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy));
}();
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memset.
if (!CanContinue) {
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
CallInst *New = IRB.CreateMemSet(
getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
getSliceAlign(), II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
// If we can represent this as a simple value, we have to build the actual
// value to store, which requires expanding the byte present in memset to
// a sensible representation for the alloca type. This is essentially
// splatting the byte to a sufficiently wide integer, splatting it across
// any desired vector width, and bitcasting to the final type.
Value *V;
if (VecTy) {
// If this is a memset of a vectorized alloca, insert it.
assert(ElementTy == ScalarTy);
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Value *Splat =
getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
Splat = convertValue(DL, IRB, Splat, ElementTy);
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
// set integer.
assert(!II.isVolatile());
uint64_t Size = NewEndOffset - NewBeginOffset;
V = getIntegerSplat(II.getValue(), Size);
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, V, Offset, "insert");
} else {
assert(V->getType() == IntTy &&
"Wrong type for an alloca wide integer!");
}
V = convertValue(DL, IRB, V, AllocaTy);
} else {
// Established these invariants above.
assert(NewBeginOffset == NewAllocaBeginOffset);
assert(NewEndOffset == NewAllocaEndOffset);
V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
V = getVectorSplat(V, AllocaVecTy->getNumElements());
V = convertValue(DL, IRB, V, AllocaTy);
}
StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return !II.isVolatile();
}
bool visitMemTransferInst(MemTransferInst &II) {
// Rewriting of memory transfer instructions can be a bit tricky. We break
// them into two categories: split intrinsics and unsplit intrinsics.
LLVM_DEBUG(dbgs() << " original: " << II << "\n");
AAMDNodes AATags;
II.getAAMetadata(AATags);
bool IsDest = &II.getRawDestUse() == OldUse;
assert((IsDest && II.getRawDest() == OldPtr) ||
(!IsDest && II.getRawSource() == OldPtr));
unsigned SliceAlign = getSliceAlign();
// For unsplit intrinsics, we simply modify the source and destination
// pointers in place. This isn't just an optimization, it is a matter of
// correctness. With unsplit intrinsics we may be dealing with transfers
// within a single alloca before SROA ran, or with transfers that have
// a variable length. We may also be dealing with memmove instead of
// memcpy, and so simply updating the pointers is the necessary for us to
// update both source and dest of a single call.
if (!IsSplittable) {
Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
if (IsDest) {
II.setDest(AdjustedPtr);
II.setDestAlignment(SliceAlign);
}
else {
II.setSource(AdjustedPtr);
II.setSourceAlignment(SliceAlign);
}
LLVM_DEBUG(dbgs() << " to: " << II << "\n");
deleteIfTriviallyDead(OldPtr);
return false;
}
// For split transfer intrinsics we have an incredibly useful assurance:
// the source and destination do not reside within the same alloca, and at
// least one of them does not escape. This means that we can replace
// memmove with memcpy, and we don't need to worry about all manner of
// downsides to splitting and transforming the operations.
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memcpy.
bool EmitMemCpy =
!VecTy && !IntTy &&
(BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) ||
!NewAI.getAllocatedType()->isSingleValueType());
// If we're just going to emit a memcpy, the alloca hasn't changed, and the
// size hasn't been shrunk based on analysis of the viable range, this is
// a no-op.
if (EmitMemCpy && &OldAI == &NewAI) {
// Ensure the start lines up.
assert(NewBeginOffset == BeginOffset);
// Rewrite the size as needed.
if (NewEndOffset != EndOffset)
II.setLength(ConstantInt::get(II.getLength()->getType(),
NewEndOffset - NewBeginOffset));
return false;
}
// Record this instruction for deletion.
Pass.DeadInsts.insert(&II);
// Strip all inbounds GEPs and pointer casts to try to dig out any root
// alloca that should be re-examined after rewriting this instruction.
Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
if (AllocaInst *AI =
dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
assert(AI != &OldAI && AI != &NewAI &&
"Splittable transfers cannot reach the same alloca on both ends.");
Pass.Worklist.insert(AI);
}
Type *OtherPtrTy = OtherPtr->getType();
unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
// Compute the relative offset for the other pointer within the transfer.
unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS);
APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset);
unsigned OtherAlign =
IsDest ? II.getSourceAlignment() : II.getDestAlignment();
OtherAlign = MinAlign(OtherAlign ? OtherAlign : 1,
OtherOffset.zextOrTrunc(64).getZExtValue());
if (EmitMemCpy) {
// Compute the other pointer, folding as much as possible to produce
// a single, simple GEP in most cases.
OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
Value *DestPtr, *SrcPtr;
unsigned DestAlign, SrcAlign;
// Note: IsDest is true iff we're copying into the new alloca slice
if (IsDest) {
DestPtr = OurPtr;
DestAlign = SliceAlign;
SrcPtr = OtherPtr;
SrcAlign = OtherAlign;
} else {
DestPtr = OtherPtr;
DestAlign = OtherAlign;
SrcPtr = OurPtr;
SrcAlign = SliceAlign;
}
CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign,
Size, II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset;
uint64_t Size = NewEndOffset - NewBeginOffset;
unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy =
IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr;
// Reset the other pointer type to match the register type we're going to
// use, but using the address space of the original other pointer.
Type *OtherTy;
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherTy = VecTy->getElementType();
else
OtherTy = VectorType::get(VecTy->getElementType(), NumElements);
} else if (IntTy && !IsWholeAlloca) {
OtherTy = SubIntTy;
} else {
OtherTy = NewAllocaTy;
}
OtherPtrTy = OtherTy->getPointerTo(OtherAS);
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
unsigned SrcAlign = OtherAlign;
Value *DstPtr = &NewAI;
unsigned DstAlign = SliceAlign;
if (!IsDest) {
std::swap(SrcPtr, DstPtr);
std::swap(SrcAlign, DstAlign);
}
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
II.isVolatile(), "copyload");
if (AATags)
Load->setAAMetadata(AATags);
Src = Load;
}
if (VecTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
Src = convertValue(DL, IRB, Src, NewAllocaTy);
}
StoreInst *Store = cast<StoreInst>(
IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
if (AATags)
Store->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
}
bool visitIntrinsicInst(IntrinsicInst &II) {
assert(II.isLifetimeStartOrEnd());
LLVM_DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getArgOperand(1) == OldPtr);
// Record this instruction for deletion.
Pass.DeadInsts.insert(&II);
// Lifetime intrinsics are only promotable if they cover the whole alloca.
// Therefore, we drop lifetime intrinsics which don't cover the whole
// alloca.
// (In theory, intrinsics which partially cover an alloca could be
// promoted, but PromoteMemToReg doesn't handle that case.)
// FIXME: Check whether the alloca is promotable before dropping the
// lifetime intrinsics?
if (NewBeginOffset != NewAllocaBeginOffset ||
NewEndOffset != NewAllocaEndOffset)
return true;
ConstantInt *Size =
ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
NewEndOffset - NewBeginOffset);
// Lifetime intrinsics always expect an i8* so directly get such a pointer
// for the new alloca slice.
Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace());
Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy);
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
New = IRB.CreateLifetimeStart(Ptr, Size);
else
New = IRB.CreateLifetimeEnd(Ptr, Size);
(void)New;
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return true;
}
void fixLoadStoreAlign(Instruction &Root) {
// This algorithm implements the same visitor loop as
// hasUnsafePHIOrSelectUse, and fixes the alignment of each load
// or store found.
SmallPtrSet<Instruction *, 4> Visited;
SmallVector<Instruction *, 4> Uses;
Visited.insert(&Root);
Uses.push_back(&Root);
do {
Instruction *I = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
unsigned LoadAlign = LI->getAlignment();
if (!LoadAlign)
LoadAlign = DL.getABITypeAlignment(LI->getType());
LI->setAlignment(std::min(LoadAlign, getSliceAlign()));
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
unsigned StoreAlign = SI->getAlignment();
if (!StoreAlign) {
Value *Op = SI->getOperand(0);
StoreAlign = DL.getABITypeAlignment(Op->getType());
}
SI->setAlignment(std::min(StoreAlign, getSliceAlign()));
continue;
}
assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) ||
isa<PHINode>(I) || isa<SelectInst>(I) ||
isa<GetElementPtrInst>(I));
for (User *U : I->users())
if (Visited.insert(cast<Instruction>(U)).second)
Uses.push_back(cast<Instruction>(U));
} while (!Uses.empty());
}
bool visitPHINode(PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
// We would like to compute a new pointer in only one place, but have it be
// as local as possible to the PHI. To do that, we re-use the location of
// the old pointer, which necessarily must be in the right position to
// dominate the PHI.
IRBuilderTy PtrBuilder(IRB);
if (isa<PHINode>(OldPtr))
PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt());
else
PtrBuilder.SetInsertPoint(OldPtr);
PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
// Replace the operands which were using the old pointer.
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
LLVM_DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
// Fix the alignment of any loads or stores using this PHI node.
fixLoadStoreAlign(PN);
// PHIs can't be promoted on their own, but often can be speculated. We
// check the speculation outside of the rewriter so that we see the
// fully-rewritten alloca.
PHIUsers.insert(&PN);
return true;
}
bool visitSelectInst(SelectInst &SI) {
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
"Pointer isn't an operand!");
assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
// Replace the operands which were using the old pointer.
if (SI.getOperand(1) == OldPtr)
SI.setOperand(1, NewPtr);
if (SI.getOperand(2) == OldPtr)
SI.setOperand(2, NewPtr);
LLVM_DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
// Fix the alignment of any loads or stores using this select.
fixLoadStoreAlign(SI);
// Selects can't be promoted on their own, but often can be speculated. We
// check the speculation outside of the rewriter so that we see the
// fully-rewritten alloca.
SelectUsers.insert(&SI);
return true;
}
};
namespace {
/// Visitor to rewrite aggregate loads and stores as scalar.
///
/// This pass aggressively rewrites all aggregate loads and stores on
/// a particular pointer (or any pointer derived from it which we can identify)
/// with scalar loads and stores.
class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
// Befriend the base class so it can delegate to private visit methods.
friend class InstVisitor<AggLoadStoreRewriter, bool>;
/// Queue of pointer uses to analyze and potentially rewrite.
SmallVector<Use *, 8> Queue;
/// Set to prevent us from cycling with phi nodes and loops.
SmallPtrSet<User *, 8> Visited;
/// The current pointer use being rewritten. This is used to dig up the used
/// value (as opposed to the user).
Use *U;
/// Used to calculate offsets, and hence alignment, of subobjects.
const DataLayout &DL;
public:
AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
bool rewrite(Instruction &I) {
LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
enqueueUsers(I);
bool Changed = false;
while (!Queue.empty()) {
U = Queue.pop_back_val();
Changed |= visit(cast<Instruction>(U->getUser()));
}
return Changed;
}
private:
/// Enqueue all the users of the given instruction for further processing.
/// This uses a set to de-duplicate users.
void enqueueUsers(Instruction &I) {
for (Use &U : I.uses())
if (Visited.insert(U.getUser()).second)
Queue.push_back(&U);
}
// Conservative default is to not rewrite anything.
bool visitInstruction(Instruction &I) { return false; }
/// Generic recursive split emission class.
template <typename Derived> class OpSplitter {
protected:
/// The builder used to form new instructions.
IRBuilderTy IRB;
/// The indices which to be used with insert- or extractvalue to select the
/// appropriate value within the aggregate.
SmallVector<unsigned, 4> Indices;
/// The indices to a GEP instruction which will move Ptr to the correct slot
/// within the aggregate.
SmallVector<Value *, 4> GEPIndices;
/// The base pointer of the original op, used as a base for GEPing the
/// split operations.
Value *Ptr;
/// The base pointee type being GEPed into.
Type *BaseTy;
/// Known alignment of the base pointer.
unsigned BaseAlign;
/// To calculate offset of each component so we can correctly deduce
/// alignments.
const DataLayout &DL;
/// Initialize the splitter with an insertion point, Ptr and start with a
/// single zero GEP index.
OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
unsigned BaseAlign, const DataLayout &DL)
: IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr),
BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {}
public:
/// Generic recursive split emission routine.
///
/// This method recursively splits an aggregate op (load or store) into
/// scalar or vector ops. It splits recursively until it hits a single value
/// and emits that single value operation via the template argument.
///
/// The logic of this routine relies on GEPs and insertvalue and
/// extractvalue all operating with the same fundamental index list, merely
/// formatted differently (GEPs need actual values).
///
/// \param Ty The type being split recursively into smaller ops.
/// \param Agg The aggregate value being built up or stored, depending on
/// whether this is splitting a load or a store respectively.
void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
if (Ty->isSingleValueType()) {
unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices);
return static_cast<Derived *>(this)->emitFunc(
Ty, Agg, MinAlign(BaseAlign, Offset), Name);
}
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
unsigned OldSize = Indices.size();
(void)OldSize;
for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
++Idx) {
assert(Indices.size() == OldSize && "Did not return to the old size");
Indices.push_back(Idx);
GEPIndices.push_back(IRB.getInt32(Idx));
emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
GEPIndices.pop_back();
Indices.pop_back();
}
return;
}
if (StructType *STy = dyn_cast<StructType>(Ty)) {
unsigned OldSize = Indices.size();
(void)OldSize;
for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
++Idx) {
assert(Indices.size() == OldSize && "Did not return to the old size");
Indices.push_back(Idx);
GEPIndices.push_back(IRB.getInt32(Idx));
emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
GEPIndices.pop_back();
Indices.pop_back();
}
return;
}
llvm_unreachable("Only arrays and structs are aggregate loadable types");
}
};
struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
AAMDNodes AATags;
LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
: OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
DL), AATags(AATags) {}
/// Emit a leaf load of a single value. This is called at the leaves of the
/// recursive emission to actually load values.
void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
assert(Ty->isSingleValueType());
// Load the single value and insert it using the indices.
Value *GEP =
IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load");
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
LLVM_DEBUG(dbgs() << " to: " << *Load << "\n");
}
};
bool visitLoadInst(LoadInst &LI) {
assert(LI.getPointerOperand() == *U);
if (!LI.isSimple() || LI.getType()->isSingleValueType())
return false;
// We have an aggregate being loaded, split it apart.
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
AAMDNodes AATags;
LI.getAAMetadata(AATags);
LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags,
getAdjustedAlignment(&LI, 0, DL), DL);
Value *V = UndefValue::get(LI.getType());
Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
LI.replaceAllUsesWith(V);
LI.eraseFromParent();
return true;
}
struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
: OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
DL),
AATags(AATags) {}
AAMDNodes AATags;
/// Emit a leaf store of a single value. This is called at the leaves of the
/// recursive emission to actually produce stores.
void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
assert(Ty->isSingleValueType());
// Extract the single value and store it using the indices.
//
// The gep and extractvalue values are factored out of the CreateStore
// call to make the output independent of the argument evaluation order.
Value *ExtractValue =
IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
Value *InBoundsGEP =
IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
StoreInst *Store =
IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Align);
if (AATags)
Store->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
}
};
bool visitStoreInst(StoreInst &SI) {
if (!SI.isSimple() || SI.getPointerOperand() != *U)
return false;
Value *V = SI.getValueOperand();
if (V->getType()->isSingleValueType())
return false;
// We have an aggregate being stored, split it apart.
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
AAMDNodes AATags;
SI.getAAMetadata(AATags);
StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags,
getAdjustedAlignment(&SI, 0, DL), DL);
Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
SI.eraseFromParent();
return true;
}
bool visitBitCastInst(BitCastInst &BC) {
enqueueUsers(BC);
return false;
}
bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
enqueueUsers(ASC);
return false;
}
bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
enqueueUsers(GEPI);
return false;
}
bool visitPHINode(PHINode &PN) {
enqueueUsers(PN);
return false;
}
bool visitSelectInst(SelectInst &SI) {
enqueueUsers(SI);
return false;
}
};
} // end anonymous namespace
/// Strip aggregate type wrapping.
///
/// This removes no-op aggregate types wrapping an underlying type. It will
/// strip as many layers of types as it can without changing either the type
/// size or the allocated size.
static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
if (Ty->isSingleValueType())
return Ty;
uint64_t AllocSize = DL.getTypeAllocSize(Ty);
uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
Type *InnerTy;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
InnerTy = ArrTy->getElementType();
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Index = SL->getElementContainingOffset(0);
InnerTy = STy->getElementType(Index);
} else {
return Ty;
}
if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
TypeSize > DL.getTypeSizeInBits(InnerTy))
return Ty;
return stripAggregateTypeWrapping(DL, InnerTy);
}
/// Try to find a partition of the aggregate type passed in for a given
/// offset and size.
///
/// This recurses through the aggregate type and tries to compute a subtype
/// based on the offset and size. When the offset and size span a sub-section
/// of an array, it will even compute a new array type for that sub-section,
/// and the same for structs.
///
/// Note that this routine is very strict and tries to find a partition of the
/// type which produces the *exact* right offset and size. It is not forgiving
/// when the size or offset cause either end of type-based partition to be off.
/// Also, this is a best-effort routine. It is reasonable to give up and not
/// return a type if necessary.
static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
uint64_t Size) {
if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
return stripAggregateTypeWrapping(DL, Ty);
if (Offset > DL.getTypeAllocSize(Ty) ||
(DL.getTypeAllocSize(Ty) - Offset) < Size)
return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
Type *ElementTy = SeqTy->getElementType();
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
if (NumSkippedElements >= SeqTy->getNumElements())
return nullptr;
Offset -= NumSkippedElements * ElementSize;
// First check if we need to recurse.
if (Offset > 0 || Size < ElementSize) {
// Bail if the partition ends in a different array element.
if ((Offset + Size) > ElementSize)
return nullptr;
// Recurse through the element type trying to peel off offset bytes.
return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
return stripAggregateTypeWrapping(DL, ElementTy);
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
return nullptr;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
return nullptr;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
return nullptr;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
Type *ElementTy = STy->getElementType(Index);
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
return nullptr; // The offset points into alignment padding.
// See if any partition must be contained by the element.
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
return nullptr;
return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
return stripAggregateTypeWrapping(DL, ElementTy);
StructType::element_iterator EI = STy->element_begin() + Index,
EE = STy->element_end();
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
return nullptr; // Within a single element and its padding.
// Don't try to form "natural" types if the elements don't line up with the
// expected size.
// FIXME: We could potentially recurse down through the last element in the
// sub-struct to find a natural end point.
if (SL->getElementOffset(EndIndex) != EndOffset)
return nullptr;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
}
// Try to build up a sub-structure.
StructType *SubTy =
StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked());
const StructLayout *SubSL = DL.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
return nullptr; // The sub-struct doesn't have quite the size needed.
return SubTy;
}
/// Pre-split loads and stores to simplify rewriting.
///
/// We want to break up the splittable load+store pairs as much as
/// possible. This is important to do as a preprocessing step, as once we
/// start rewriting the accesses to partitions of the alloca we lose the
/// necessary information to correctly split apart paired loads and stores
/// which both point into this alloca. The case to consider is something like
/// the following:
///
/// %a = alloca [12 x i8]
/// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
/// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
/// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
/// %iptr1 = bitcast i8* %gep1 to i64*
/// %iptr2 = bitcast i8* %gep2 to i64*
/// %fptr1 = bitcast i8* %gep1 to float*
/// %fptr2 = bitcast i8* %gep2 to float*
/// %fptr3 = bitcast i8* %gep3 to float*
/// store float 0.0, float* %fptr1
/// store float 1.0, float* %fptr2
/// %v = load i64* %iptr1
/// store i64 %v, i64* %iptr2
/// %f1 = load float* %fptr2
/// %f2 = load float* %fptr3
///
/// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
/// promote everything so we recover the 2 SSA values that should have been
/// there all along.
///
/// \returns true if any changes are made.
bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
// Track the loads and stores which are candidates for pre-splitting here, in
// the order they first appear during the partition scan. These give stable
// iteration order and a basis for tracking which loads and stores we
// actually split.
SmallVector<LoadInst *, 4> Loads;
SmallVector<StoreInst *, 4> Stores;
// We need to accumulate the splits required of each load or store where we
// can find them via a direct lookup. This is important to cross-check loads
// and stores against each other. We also track the slice so that we can kill
// all the slices that end up split.
struct SplitOffsets {
Slice *S;
std::vector<uint64_t> Splits;
};
SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap;
// Track loads out of this alloca which cannot, for any reason, be pre-split.
// This is important as we also cannot pre-split stores of those loads!
// FIXME: This is all pretty gross. It means that we can be more aggressive
// in pre-splitting when the load feeding the store happens to come from
// a separate alloca. Put another way, the effectiveness of SROA would be
// decreased by a frontend which just concatenated all of its local allocas
// into one big flat alloca. But defeating such patterns is exactly the job
// SROA is tasked with! Sadly, to not have this discrepancy we would have
// change store pre-splitting to actually force pre-splitting of the load
// that feeds it *and all stores*. That makes pre-splitting much harder, but
// maybe it would make it more principled?
SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
for (auto &P : AS.partitions()) {
for (Slice &S : P) {
Instruction *I = cast<Instruction>(S.getUse()->getUser());
if (!S.isSplittable() || S.endOffset() <= P.endOffset()) {
// If this is a load we have to track that it can't participate in any
// pre-splitting. If this is a store of a load we have to track that
// that load also can't participate in any pre-splitting.
if (auto *LI = dyn_cast<LoadInst>(I))
UnsplittableLoads.insert(LI);
else if (auto *SI = dyn_cast<StoreInst>(I))
if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand()))
UnsplittableLoads.insert(LI);
continue;
}
assert(P.endOffset() > S.beginOffset() &&
"Empty or backwards partition!");
// Determine if this is a pre-splittable slice.
if (auto *LI = dyn_cast<LoadInst>(I)) {
assert(!LI->isVolatile() && "Cannot split volatile loads!");
// The load must be used exclusively to store into other pointers for
// us to be able to arbitrarily pre-split it. The stores must also be
// simple to avoid changing semantics.
auto IsLoadSimplyStored = [](LoadInst *LI) {
for (User *LU : LI->users()) {
auto *SI = dyn_cast<StoreInst>(LU);
if (!SI || !SI->isSimple())
return false;
}
return true;
};
if (!IsLoadSimplyStored(LI)) {
UnsplittableLoads.insert(LI);
continue;
}
Loads.push_back(LI);
} else if (auto *SI = dyn_cast<StoreInst>(I)) {
if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex()))
// Skip stores *of* pointers. FIXME: This shouldn't even be possible!
continue;
auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand());
if (!StoredLoad || !StoredLoad->isSimple())
continue;
assert(!SI->isVolatile() && "Cannot split volatile stores!");
Stores.push_back(SI);
} else {
// Other uses cannot be pre-split.
continue;
}
// Record the initial split.
LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n");
auto &Offsets = SplitOffsetsMap[I];
assert(Offsets.Splits.empty() &&
"Should not have splits the first time we see an instruction!");
Offsets.S = &S;
Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
}
// Now scan the already split slices, and add a split for any of them which
// we're going to pre-split.
for (Slice *S : P.splitSliceTails()) {
auto SplitOffsetsMapI =
SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser()));
if (SplitOffsetsMapI == SplitOffsetsMap.end())
continue;
auto &Offsets = SplitOffsetsMapI->second;
assert(Offsets.S == S && "Found a mismatched slice!");
assert(!Offsets.Splits.empty() &&
"Cannot have an empty set of splits on the second partition!");
assert(Offsets.Splits.back() ==
P.beginOffset() - Offsets.S->beginOffset() &&
"Previous split does not end where this one begins!");
// Record each split. The last partition's end isn't needed as the size
// of the slice dictates that.
if (S->endOffset() > P.endOffset())
Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
}
}
// We may have split loads where some of their stores are split stores. For
// such loads and stores, we can only pre-split them if their splits exactly
// match relative to their starting offset. We have to verify this prior to
// any rewriting.
Stores.erase(
llvm::remove_if(Stores,
[&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
// Lookup the load we are storing in our map of split
// offsets.
auto *LI = cast<LoadInst>(SI->getValueOperand());
// If it was completely unsplittable, then we're done,
// and this store can't be pre-split.
if (UnsplittableLoads.count(LI))
return true;
auto LoadOffsetsI = SplitOffsetsMap.find(LI);
if (LoadOffsetsI == SplitOffsetsMap.end())
return false; // Unrelated loads are definitely safe.
auto &LoadOffsets = LoadOffsetsI->second;
// Now lookup the store's offsets.
auto &StoreOffsets = SplitOffsetsMap[SI];
// If the relative offsets of each split in the load and
// store match exactly, then we can split them and we
// don't need to remove them here.
if (LoadOffsets.Splits == StoreOffsets.Splits)
return false;
LLVM_DEBUG(
dbgs()
<< " Mismatched splits for load and store:\n"
<< " " << *LI << "\n"
<< " " << *SI << "\n");
// We've found a store and load that we need to split
// with mismatched relative splits. Just give up on them
// and remove both instructions from our list of
// candidates.
UnsplittableLoads.insert(LI);
return true;
}),
Stores.end());
// Now we have to go *back* through all the stores, because a later store may
// have caused an earlier store's load to become unsplittable and if it is
// unsplittable for the later store, then we can't rely on it being split in
// the earlier store either.
Stores.erase(llvm::remove_if(Stores,
[&UnsplittableLoads](StoreInst *SI) {
auto *LI =
cast<LoadInst>(SI->getValueOperand());
return UnsplittableLoads.count(LI);
}),
Stores.end());
// Once we've established all the loads that can't be split for some reason,
// filter any that made it into our list out.
Loads.erase(llvm::remove_if(Loads,
[&UnsplittableLoads](LoadInst *LI) {
return UnsplittableLoads.count(LI);
}),
Loads.end());
// If no loads or stores are left, there is no pre-splitting to be done for
// this alloca.
if (Loads.empty() && Stores.empty())
return false;
// From here on, we can't fail and will be building new accesses, so rig up
// an IR builder.
IRBuilderTy IRB(&AI);
// Collect the new slices which we will merge into the alloca slices.
SmallVector<Slice, 4> NewSlices;
// Track any allocas we end up splitting loads and stores for so we iterate
// on them.
SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas;
// At this point, we have collected all of the loads and stores we can
// pre-split, and the specific splits needed for them. We actually do the
// splitting in a specific order in order to handle when one of the loads in
// the value operand to one of the stores.
//
// First, we rewrite all of the split loads, and just accumulate each split
// load in a parallel structure. We also build the slices for them and append
// them to the alloca slices.
SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap;
std::vector<LoadInst *> SplitLoads;
const DataLayout &DL = AI.getModule()->getDataLayout();
for (LoadInst *LI : Loads) {
SplitLoads.clear();
IntegerType *Ty = cast<IntegerType>(LI->getType());
uint64_t LoadSize = Ty->getBitWidth() / 8;
assert(LoadSize > 0 && "Cannot have a zero-sized integer load!");
auto &Offsets = SplitOffsetsMap[LI];
assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
"Slice size should always match load size exactly!");
uint64_t BaseOffset = Offsets.S->beginOffset();
assert(BaseOffset + LoadSize > BaseOffset &&
"Cannot represent alloca access size using 64-bit integers!");
Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
IRB.SetInsertPoint(LI);
LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
int Idx = 0, Size = Offsets.Splits.size();
for (;;) {
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
auto AS = LI->getPointerAddressSpace();
auto *PartPtrTy = PartTy->getPointerTo(AS);
LoadInst *PLoad = IRB.CreateAlignedLoad(
PartTy,
getAdjustedPtr(IRB, DL, BasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
// Append this load onto the list of split loads so we can find it later
// to rewrite the stores.
SplitLoads.push_back(PLoad);
// Now build a new slice for the alloca.
NewSlices.push_back(
Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
&PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
/*IsSplittable*/ false));
LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
<< ", " << NewSlices.back().endOffset()
<< "): " << *PLoad << "\n");
// See if we've handled all the splits.
if (Idx >= Size)
break;
// Setup the next partition.
PartOffset = Offsets.Splits[Idx];
++Idx;
PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset;
}
// Now that we have the split loads, do the slow walk over all uses of the
// load and rewrite them as split stores, or save the split loads to use
// below if the store is going to be split there anyways.
bool DeferredStores = false;
for (User *LU : LI->users()) {
StoreInst *SI = cast<StoreInst>(LU);
if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
DeferredStores = true;
LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
<< "\n");
continue;
}
Value *StoreBasePtr = SI->getPointerOperand();
IRB.SetInsertPoint(SI);
LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
LoadInst *PLoad = SplitLoads[Idx];
uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
auto *PartPtrTy =
PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
auto AS = SI->getPointerAddressSpace();
StoreInst *PStore = IRB.CreateAlignedStore(
PLoad,
getAdjustedPtr(IRB, DL, StoreBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
}
// We want to immediately iterate on any allocas impacted by splitting
// this store, and we have to track any promotable alloca (indicated by
// a direct store) as needing to be resplit because it is no longer
// promotable.
if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) {
ResplitPromotableAllocas.insert(OtherAI);
Worklist.insert(OtherAI);
} else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
StoreBasePtr->stripInBoundsOffsets())) {
Worklist.insert(OtherAI);
}
// Mark the original store as dead.
DeadInsts.insert(SI);
}
// Save the split loads if there are deferred stores among the users.
if (DeferredStores)
SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads)));
// Mark the original load as dead and kill the original slice.
DeadInsts.insert(LI);
Offsets.S->kill();
}
// Second, we rewrite all of the split stores. At this point, we know that
// all loads from this alloca have been split already. For stores of such
// loads, we can simply look up the pre-existing split loads. For stores of
// other loads, we split those loads first and then write split stores of
// them.
for (StoreInst *SI : Stores) {
auto *LI = cast<LoadInst>(SI->getValueOperand());
IntegerType *Ty = cast<IntegerType>(LI->getType());
uint64_t StoreSize = Ty->getBitWidth() / 8;
assert(StoreSize > 0 && "Cannot have a zero-sized integer store!");
auto &Offsets = SplitOffsetsMap[SI];
assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
"Slice size should always match load size exactly!");
uint64_t BaseOffset = Offsets.S->beginOffset();
assert(BaseOffset + StoreSize > BaseOffset &&
"Cannot represent alloca access size using 64-bit integers!");
Value *LoadBasePtr = LI->getPointerOperand();
Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
// Check whether we have an already split load.
auto SplitLoadsMapI = SplitLoadsMap.find(LI);
std::vector<LoadInst *> *SplitLoads = nullptr;
if (SplitLoadsMapI != SplitLoadsMap.end()) {
SplitLoads = &SplitLoadsMapI->second;
assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
"Too few split loads for the number of splits in the store!");
} else {
LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n");
}
uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
int Idx = 0, Size = Offsets.Splits.size();
for (;;) {
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
// Either lookup a split load or create one.
LoadInst *PLoad;
if (SplitLoads) {
PLoad = (*SplitLoads)[Idx];
} else {
IRB.SetInsertPoint(LI);
auto AS = LI->getPointerAddressSpace();
PLoad = IRB.CreateAlignedLoad(
PartTy,
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
}
// And store this partition.
IRB.SetInsertPoint(SI);
auto AS = SI->getPointerAddressSpace();
StoreInst *PStore = IRB.CreateAlignedStore(
PLoad,
getAdjustedPtr(IRB, DL, StoreBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
StorePartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
// Now build a new slice for the alloca.
NewSlices.push_back(
Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
&PStore->getOperandUse(PStore->getPointerOperandIndex()),
/*IsSplittable*/ false));
LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
<< ", " << NewSlices.back().endOffset()
<< "): " << *PStore << "\n");
if (!SplitLoads) {
LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
}
// See if we've finished all the splits.
if (Idx >= Size)
break;
// Setup the next partition.
PartOffset = Offsets.Splits[Idx];
++Idx;
PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
}
// We want to immediately iterate on any allocas impacted by splitting
// this load, which is only relevant if it isn't a load of this alloca and
// thus we didn't already split the loads above. We also have to keep track
// of any promotable allocas we split loads on as they can no longer be
// promoted.
if (!SplitLoads) {
if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) {
assert(OtherAI != &AI && "We can't re-split our own alloca!");
ResplitPromotableAllocas.insert(OtherAI);
Worklist.insert(OtherAI);
} else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
LoadBasePtr->stripInBoundsOffsets())) {
assert(OtherAI != &AI && "We can't re-split our own alloca!");
Worklist.insert(OtherAI);
}
}
// Mark the original store as dead now that we've split it up and kill its
// slice. Note that we leave the original load in place unless this store
// was its only use. It may in turn be split up if it is an alloca load
// for some other alloca, but it may be a normal load. This may introduce
// redundant loads, but where those can be merged the rest of the optimizer
// should handle the merging, and this uncovers SSA splits which is more
// important. In practice, the original loads will almost always be fully
// split and removed eventually, and the splits will be merged by any
// trivial CSE, including instcombine.
if (LI->hasOneUse()) {
assert(*LI->user_begin() == SI && "Single use isn't this store!");
DeadInsts.insert(LI);
}
DeadInsts.insert(SI);
Offsets.S->kill();
}
// Remove the killed slices that have ben pre-split.
AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }),
AS.end());
// Insert our new slices. This will sort and merge them into the sorted
// sequence.
AS.insert(NewSlices);
LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
#ifndef NDEBUG
for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
LLVM_DEBUG(AS.print(dbgs(), I, " "));
#endif
// Finally, don't try to promote any allocas that new require re-splitting.
// They have already been added to the worklist above.
PromotableAllocas.erase(
llvm::remove_if(
PromotableAllocas,
[&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }),
PromotableAllocas.end());
return true;
}
/// Rewrite an alloca partition's users.
///
/// This routine drives both of the rewriting goals of the SROA pass. It tries
/// to rewrite uses of an alloca partition to be conducive for SSA value
/// promotion. If the partition needs a new, more refined alloca, this will
/// build that new alloca, preserving as much type information as possible, and
/// rewrite the uses of the old alloca to point at the new one and have the
/// appropriate new offsets. It also evaluates how successful the rewrite was
/// at enabling promotion and if it was successful queues the alloca to be
/// promoted.
AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
Partition &P) {
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
Type *SliceTy = nullptr;
const DataLayout &DL = AI.getModule()->getDataLayout();
if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset()))
if (DL.getTypeAllocSize(CommonUseTy) >= P.size())
SliceTy = CommonUseTy;
if (!SliceTy)
if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
P.beginOffset(), P.size()))
SliceTy = TypePartitionTy;
if ((!SliceTy || (SliceTy->isArrayTy() &&
SliceTy->getArrayElementType()->isIntegerTy())) &&
DL.isLegalInteger(P.size() * 8))
SliceTy = Type::getIntNTy(*C, P.size() * 8);
if (!SliceTy)
SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
assert(DL.getTypeAllocSize(SliceTy) >= P.size());
bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
VectorType *VecTy =
IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
if (VecTy)
SliceTy = VecTy;
// Check for the case where we're going to rewrite to a new alloca of the
// exact same type as the original, and with the same access offsets. In that
// case, re-use the existing alloca, but still run through the rewriter to
// perform phi and select speculation.
// P.beginOffset() can be non-zero even with the same type in a case with
// out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
AllocaInst *NewAI;
if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) {
NewAI = &AI;
// FIXME: We should be able to bail at this point with "nothing changed".
// FIXME: We might want to defer PHI speculation until after here.
// FIXME: return nullptr;
} else {
unsigned Alignment = AI.getAlignment();
if (!Alignment) {
// The minimum alignment which users can rely on when the explicit
// alignment is omitted or zero is that required by the ABI for this
// type.
Alignment = DL.getABITypeAlignment(AI.getAllocatedType());
}
Alignment = MinAlign(Alignment, P.beginOffset());
// If we will get at least this much alignment from the type alone, leave
// the alloca's alignment unconstrained.
if (Alignment <= DL.getABITypeAlignment(SliceTy))
Alignment = 0;
NewAI = new AllocaInst(
SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment,
AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI);
// Copy the old AI debug location over to the new one.
NewAI->setDebugLoc(AI.getDebugLoc());
++NumNewAllocas;
}
LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
<< "[" << P.beginOffset() << "," << P.endOffset()
<< ") to: " << *NewAI << "\n");
// Track the high watermark on the worklist as it is only relevant for
// promoted allocas. We will reset it to this point if the alloca is not in
// fact scheduled for promotion.
unsigned PPWOldSize = PostPromotionWorklist.size();
unsigned NumUses = 0;
SmallSetVector<PHINode *, 8> PHIUsers;
SmallSetVector<SelectInst *, 8> SelectUsers;
AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
P.endOffset(), IsIntegerPromotable, VecTy,
PHIUsers, SelectUsers);
bool Promotable = true;
for (Slice *S : P.splitSliceTails()) {
Promotable &= Rewriter.visit(S);
++NumUses;
}
for (Slice &S : P) {
Promotable &= Rewriter.visit(&S);
++NumUses;
}
NumAllocaPartitionUses += NumUses;
MaxUsesPerAllocaPartition.updateMax(NumUses);
// Now that we've processed all the slices in the new partition, check if any
// PHIs or Selects would block promotion.
for (PHINode *PHI : PHIUsers)
if (!isSafePHIToSpeculate(*PHI)) {
Promotable = false;
PHIUsers.clear();
SelectUsers.clear();
break;
}
for (SelectInst *Sel : SelectUsers)
if (!isSafeSelectToSpeculate(*Sel)) {
Promotable = false;
PHIUsers.clear();
SelectUsers.clear();
break;
}
if (Promotable) {
if (PHIUsers.empty() && SelectUsers.empty()) {
// Promote the alloca.
PromotableAllocas.push_back(NewAI);
} else {
// If we have either PHIs or Selects to speculate, add them to those
// worklists and re-queue the new alloca so that we promote in on the
// next iteration.
for (PHINode *PHIUser : PHIUsers)
SpeculatablePHIs.insert(PHIUser);
for (SelectInst *SelectUser : SelectUsers)
SpeculatableSelects.insert(SelectUser);
Worklist.insert(NewAI);
}
} else {
// Drop any post-promotion work items if promotion didn't happen.
while (PostPromotionWorklist.size() > PPWOldSize)
PostPromotionWorklist.pop_back();
// We couldn't promote and we didn't create a new partition, nothing
// happened.
if (NewAI == &AI)
return nullptr;
// If we can't promote the alloca, iterate on it to check for new
// refinements exposed by splitting the current alloca. Don't iterate on an
// alloca which didn't actually change and didn't get promoted.
Worklist.insert(NewAI);
}
return NewAI;
}
/// Walks the slices of an alloca and form partitions based on them,
/// rewriting each of their uses.
bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
if (AS.begin() == AS.end())
return false;
unsigned NumPartitions = 0;
bool Changed = false;
const DataLayout &DL = AI.getModule()->getDataLayout();
// First try to pre-split loads and stores.
Changed |= presplitLoadsAndStores(AI, AS);
// Now that we have identified any pre-splitting opportunities,
// mark loads and stores unsplittable except for the following case.
// We leave a slice splittable if all other slices are disjoint or fully
// included in the slice, such as whole-alloca loads and stores.
// If we fail to split these during pre-splitting, we want to force them
// to be rewritten into a partition.
bool IsSorted = true;
uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType());
const uint64_t MaxBitVectorSize = 1024;
if (AllocaSize <= MaxBitVectorSize) {
// If a byte boundary is included in any load or store, a slice starting or
// ending at the boundary is not splittable.
SmallBitVector SplittableOffset(AllocaSize + 1, true);
for (Slice &S : AS)
for (unsigned O = S.beginOffset() + 1;
O < S.endOffset() && O < AllocaSize; O++)
SplittableOffset.reset(O);
for (Slice &S : AS) {
if (!S.isSplittable())
continue;
if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) &&
(S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()]))
continue;
if (isa<LoadInst>(S.getUse()->getUser()) ||
isa<StoreInst>(S.getUse()->getUser())) {
S.makeUnsplittable();
IsSorted = false;
}
}
}
else {
// We only allow whole-alloca splittable loads and stores
// for a large alloca to avoid creating too large BitVector.
for (Slice &S : AS) {
if (!S.isSplittable())
continue;
if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize)
continue;
if (isa<LoadInst>(S.getUse()->getUser()) ||
isa<StoreInst>(S.getUse()->getUser())) {
S.makeUnsplittable();
IsSorted = false;
}
}
}
if (!IsSorted)
llvm::sort(AS);
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.
struct Fragment {
AllocaInst *Alloca;
uint64_t Offset;
uint64_t Size;
Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
: Alloca(AI), Offset(O), Size(S) {}
};
SmallVector<Fragment, 4> Fragments;
// Rewrite each partition.
for (auto &P : AS.partitions()) {
if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
Changed = true;
if (NewAI != &AI) {
uint64_t SizeOfByte = 8;
uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType());
// Don't include any padding.
uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
}
}
++NumPartitions;
}
NumAllocaPartitions += NumPartitions;
MaxPartitionsPerAlloca.updateMax(NumPartitions);
// Migrate debug information from the old alloca to the new alloca(s)
// and the individual partitions.
TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI);
if (!DbgDeclares.empty()) {
auto *Var = DbgDeclares.front()->getVariable();
auto *Expr = DbgDeclares.front()->getExpression();
auto VarSize = Var->getSizeInBits();
DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false);
uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType());
for (auto Fragment : Fragments) {
// Create a fragment expression describing the new partition or reuse AI's
// expression if there is only one partition.
auto *FragmentExpr = Expr;
if (Fragment.Size < AllocaSize || Expr->isFragment()) {
// If this alloca is already a scalar replacement of a larger aggregate,
// Fragment.Offset describes the offset inside the scalar.
auto ExprFragment = Expr->getFragmentInfo();
uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0;
uint64_t Start = Offset + Fragment.Offset;
uint64_t Size = Fragment.Size;
if (ExprFragment) {
uint64_t AbsEnd =
ExprFragment->OffsetInBits + ExprFragment->SizeInBits;
if (Start >= AbsEnd)
// No need to describe a SROAed padding.
continue;
Size = std::min(Size, AbsEnd - Start);
}
// The new, smaller fragment is stenciled out from the old fragment.
if (auto OrigFragment = FragmentExpr->getFragmentInfo()) {
assert(Start >= OrigFragment->OffsetInBits &&
"new fragment is outside of original fragment");
Start -= OrigFragment->OffsetInBits;
}
// The alloca may be larger than the variable.
if (VarSize) {
if (Size > *VarSize)
Size = *VarSize;
if (Size == 0 || Start + Size > *VarSize)
continue;
}
// Avoid creating a fragment expression that covers the entire variable.
if (!VarSize || *VarSize != Size) {
if (auto E =
DIExpression::createFragmentExpression(Expr, Start, Size))
FragmentExpr = *E;
else
continue;
}
}
// Remove any existing intrinsics describing the same alloca.
for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca))
OldDII->eraseFromParent();
DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr,
DbgDeclares.front()->getDebugLoc(), &AI);
}
}
return Changed;
}
/// Clobber a use with undef, deleting the used value if it becomes dead.
void SROA::clobberUse(Use &U) {
Value *OldV = U;
// Replace the use with an undef value.
U = UndefValue::get(OldV->getType());
// Check for this making an instruction dead. We have to garbage collect
// all the dead instructions to ensure the uses of any alloca end up being
// minimal.
if (Instruction *OldI = dyn_cast<Instruction>(OldV))
if (isInstructionTriviallyDead(OldI)) {
DeadInsts.insert(OldI);
}
}
/// Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
/// the slices of the alloca, and then hands it off to be split and
/// rewritten as needed.
bool SROA::runOnAlloca(AllocaInst &AI) {
LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
++NumAllocasAnalyzed;
// Special case dead allocas, as they're trivial.
if (AI.use_empty()) {
AI.eraseFromParent();
return true;
}
const DataLayout &DL = AI.getModule()->getDataLayout();
// Skip alloca forms that this analysis can't handle.
if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
DL.getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
bool Changed = false;
// First, split any FCA loads and stores touching this alloca to promote
// better splitting and promotion opportunities.
AggLoadStoreRewriter AggRewriter(DL);
Changed |= AggRewriter.rewrite(AI);
// Build the slices using a recursive instruction-visiting builder.
AllocaSlices AS(DL, AI);
LLVM_DEBUG(AS.print(dbgs()));
if (AS.isEscaped())
return Changed;
// Delete all the dead users of this alloca before splitting and rewriting it.
for (Instruction *DeadUser : AS.getDeadUsers()) {
// Free up everything used by this instruction.
for (Use &DeadOp : DeadUser->operands())
clobberUse(DeadOp);
// Now replace the uses of this instruction.
DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType()));
// And mark it for deletion.
DeadInsts.insert(DeadUser);
Changed = true;
}
for (Use *DeadOp : AS.getDeadOperands()) {
clobberUse(*DeadOp);
Changed = true;
}
// No slices to split. Leave the dead alloca for a later pass to clean up.
if (AS.begin() == AS.end())
return Changed;
Changed |= splitAlloca(AI, AS);
LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
while (!SpeculatablePHIs.empty())
speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
LLVM_DEBUG(dbgs() << " Speculating Selects\n");
while (!SpeculatableSelects.empty())
speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
return Changed;
}
/// Delete the dead instructions accumulated in this run.
///
/// Recursively deletes the dead instructions we've accumulated. This is done
/// at the very end to maximize locality of the recursive delete and to
/// minimize the problems of invalidated instruction pointers as such pointers
/// are used heavily in the intermediate stages of the algorithm.
///
/// We also record the alloca instructions deleted here so that they aren't
/// subsequently handed to mem2reg to promote.
bool SROA::deleteDeadInstructions(
SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {
bool Changed = false;
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
// If the instruction is an alloca, find the possible dbg.declare connected
// to it, and remove it too. We must do this before calling RAUW or we will
// not be able to find it.
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
DeletedAllocas.insert(AI);
for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI))
OldDII->eraseFromParent();
}
I->replaceAllUsesWith(UndefValue::get(I->getType()));
for (Use &Operand : I->operands())
if (Instruction *U = dyn_cast<Instruction>(Operand)) {
// Zero out the operand and see if it becomes trivially dead.
Operand = nullptr;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
++NumDeleted;
I->eraseFromParent();
Changed = true;
}
return Changed;
}
/// Promote the allocas, using the best available technique.
///
/// This attempts to promote whatever allocas have been identified as viable in
/// the PromotableAllocas list. If that list is empty, there is nothing to do.
/// This function returns whether any promotion occurred.
bool SROA::promoteAllocas(Function &F) {
if (PromotableAllocas.empty())
return false;
NumPromoted += PromotableAllocas.size();
LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
PromoteMemToReg(PromotableAllocas, *DT, AC);
PromotableAllocas.clear();
return true;
}
PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT,
AssumptionCache &RunAC) {
LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
DT = &RunDT;
AC = &RunAC;
BasicBlock &EntryBB = F.getEntryBlock();
for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
I != E; ++I) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
}
bool Changed = false;
// A set of deleted alloca instruction pointers which should be removed from
// the list of promotable allocas.
SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
do {
while (!Worklist.empty()) {
Changed |= runOnAlloca(*Worklist.pop_back_val());
Changed |= deleteDeadInstructions(DeletedAllocas);
// Remove the deleted allocas from various lists so that we don't try to
// continue processing them.
if (!DeletedAllocas.empty()) {
auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
Worklist.remove_if(IsInSet);
PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet),
PromotableAllocas.end());
DeletedAllocas.clear();
}
}
Changed |= promoteAllocas(F);
Worklist = PostPromotionWorklist;
PostPromotionWorklist.clear();
} while (!Worklist.empty());
if (!Changed)
return PreservedAnalyses::all();
PreservedAnalyses PA;
PA.preserveSet<CFGAnalyses>();
PA.preserve<GlobalsAA>();
return PA;
}
PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) {
return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F),
AM.getResult<AssumptionAnalysis>(F));
}
/// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
///
/// This is in the llvm namespace purely to allow it to be a friend of the \c
/// SROA pass.
class llvm::sroa::SROALegacyPass : public FunctionPass {
/// The SROA implementation.
SROA Impl;
public:
static char ID;
SROALegacyPass() : FunctionPass(ID) {
initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
auto PA = Impl.runImpl(
F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
return !PA.areAllPreserved();
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.setPreservesCFG();
}
StringRef getPassName() const override { return "SROA"; }
};
char SROALegacyPass::ID = 0;
FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); }
INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa",
"Scalar Replacement Of Aggregates", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates",
false, false)
Index: stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp (revision 356465)
@@ -1,816 +1,822 @@
//===- ELFObjcopy.cpp -----------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ELFObjcopy.h"
#include "Buffer.h"
#include "CopyConfig.h"
#include "Object.h"
#include "llvm-objcopy.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Object/Error.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <system_error>
#include <utility>
namespace llvm {
namespace objcopy {
namespace elf {
using namespace object;
using namespace ELF;
using SectionPred = std::function<bool(const SectionBase &Sec)>;
static bool isDebugSection(const SectionBase &Sec) {
return StringRef(Sec.Name).startswith(".debug") ||
StringRef(Sec.Name).startswith(".zdebug") || Sec.Name == ".gdb_index";
}
static bool isDWOSection(const SectionBase &Sec) {
return StringRef(Sec.Name).endswith(".dwo");
}
static bool onlyKeepDWOPred(const Object &Obj, const SectionBase &Sec) {
// We can't remove the section header string table.
if (&Sec == Obj.SectionNames)
return false;
// Short of keeping the string table we want to keep everything that is a DWO
// section and remove everything else.
return !isDWOSection(Sec);
}
uint64_t getNewShfFlags(SectionFlag AllFlags) {
uint64_t NewFlags = 0;
if (AllFlags & SectionFlag::SecAlloc)
NewFlags |= ELF::SHF_ALLOC;
if (!(AllFlags & SectionFlag::SecReadonly))
NewFlags |= ELF::SHF_WRITE;
if (AllFlags & SectionFlag::SecCode)
NewFlags |= ELF::SHF_EXECINSTR;
if (AllFlags & SectionFlag::SecMerge)
NewFlags |= ELF::SHF_MERGE;
if (AllFlags & SectionFlag::SecStrings)
NewFlags |= ELF::SHF_STRINGS;
return NewFlags;
}
static uint64_t getSectionFlagsPreserveMask(uint64_t OldFlags,
uint64_t NewFlags) {
// Preserve some flags which should not be dropped when setting flags.
// Also, preserve anything OS/processor dependant.
const uint64_t PreserveMask = ELF::SHF_COMPRESSED | ELF::SHF_EXCLUDE |
ELF::SHF_GROUP | ELF::SHF_LINK_ORDER |
ELF::SHF_MASKOS | ELF::SHF_MASKPROC |
ELF::SHF_TLS | ELF::SHF_INFO_LINK;
return (OldFlags & PreserveMask) | (NewFlags & ~PreserveMask);
}
static void setSectionFlagsAndType(SectionBase &Sec, SectionFlag Flags) {
Sec.Flags = getSectionFlagsPreserveMask(Sec.Flags, getNewShfFlags(Flags));
// In GNU objcopy, certain flags promote SHT_NOBITS to SHT_PROGBITS. This rule
// may promote more non-ALLOC sections than GNU objcopy, but it is fine as
// non-ALLOC SHT_NOBITS sections do not make much sense.
if (Sec.Type == SHT_NOBITS &&
(!(Sec.Flags & ELF::SHF_ALLOC) ||
Flags & (SectionFlag::SecContents | SectionFlag::SecLoad)))
Sec.Type = SHT_PROGBITS;
}
static ElfType getOutputElfType(const Binary &Bin) {
// Infer output ELF type from the input ELF object
if (isa<ELFObjectFile<ELF32LE>>(Bin))
return ELFT_ELF32LE;
if (isa<ELFObjectFile<ELF64LE>>(Bin))
return ELFT_ELF64LE;
if (isa<ELFObjectFile<ELF32BE>>(Bin))
return ELFT_ELF32BE;
if (isa<ELFObjectFile<ELF64BE>>(Bin))
return ELFT_ELF64BE;
llvm_unreachable("Invalid ELFType");
}
static ElfType getOutputElfType(const MachineInfo &MI) {
// Infer output ELF type from the binary arch specified
if (MI.Is64Bit)
return MI.IsLittleEndian ? ELFT_ELF64LE : ELFT_ELF64BE;
else
return MI.IsLittleEndian ? ELFT_ELF32LE : ELFT_ELF32BE;
}
static std::unique_ptr<Writer> createELFWriter(const CopyConfig &Config,
Object &Obj, Buffer &Buf,
ElfType OutputElfType) {
// Depending on the initial ELFT and OutputFormat we need a different Writer.
switch (OutputElfType) {
case ELFT_ELF32LE:
return llvm::make_unique<ELFWriter<ELF32LE>>(Obj, Buf,
!Config.StripSections);
case ELFT_ELF64LE:
return llvm::make_unique<ELFWriter<ELF64LE>>(Obj, Buf,
!Config.StripSections);
case ELFT_ELF32BE:
return llvm::make_unique<ELFWriter<ELF32BE>>(Obj, Buf,
!Config.StripSections);
case ELFT_ELF64BE:
return llvm::make_unique<ELFWriter<ELF64BE>>(Obj, Buf,
!Config.StripSections);
}
llvm_unreachable("Invalid output format");
}
static std::unique_ptr<Writer> createWriter(const CopyConfig &Config,
Object &Obj, Buffer &Buf,
ElfType OutputElfType) {
switch (Config.OutputFormat) {
case FileFormat::Binary:
return llvm::make_unique<BinaryWriter>(Obj, Buf);
case FileFormat::IHex:
return llvm::make_unique<IHexWriter>(Obj, Buf);
default:
return createELFWriter(Config, Obj, Buf, OutputElfType);
}
}
template <class ELFT>
static Expected<ArrayRef<uint8_t>>
findBuildID(const CopyConfig &Config, const object::ELFFile<ELFT> &In) {
auto PhdrsOrErr = In.program_headers();
if (auto Err = PhdrsOrErr.takeError())
return createFileError(Config.InputFilename, std::move(Err));
for (const auto &Phdr : *PhdrsOrErr) {
if (Phdr.p_type != PT_NOTE)
continue;
Error Err = Error::success();
for (const auto &Note : In.notes(Phdr, Err))
if (Note.getType() == NT_GNU_BUILD_ID && Note.getName() == ELF_NOTE_GNU)
return Note.getDesc();
if (Err)
return createFileError(Config.InputFilename, std::move(Err));
}
return createFileError(
Config.InputFilename,
createStringError(llvm::errc::invalid_argument,
"could not find build ID"));
}
static Expected<ArrayRef<uint8_t>>
findBuildID(const CopyConfig &Config, const object::ELFObjectFileBase &In) {
if (auto *O = dyn_cast<ELFObjectFile<ELF32LE>>(&In))
return findBuildID(Config, *O->getELFFile());
else if (auto *O = dyn_cast<ELFObjectFile<ELF64LE>>(&In))
return findBuildID(Config, *O->getELFFile());
else if (auto *O = dyn_cast<ELFObjectFile<ELF32BE>>(&In))
return findBuildID(Config, *O->getELFFile());
else if (auto *O = dyn_cast<ELFObjectFile<ELF64BE>>(&In))
return findBuildID(Config, *O->getELFFile());
llvm_unreachable("Bad file format");
}
template <class... Ts>
static Error makeStringError(std::error_code EC, const Twine &Msg, Ts &&... Args) {
std::string FullMsg = (EC.message() + ": " + Msg).str();
return createStringError(EC, FullMsg.c_str(), std::forward<Ts>(Args)...);
}
#define MODEL_8 "%%%%%%%%"
#define MODEL_16 MODEL_8 MODEL_8
#define MODEL_32 (MODEL_16 MODEL_16)
static Error linkToBuildIdDir(const CopyConfig &Config, StringRef ToLink,
StringRef Suffix,
ArrayRef<uint8_t> BuildIdBytes) {
SmallString<128> Path = Config.BuildIdLinkDir;
sys::path::append(Path, llvm::toHex(BuildIdBytes[0], /*LowerCase*/ true));
if (auto EC = sys::fs::create_directories(Path))
return createFileError(
Path.str(),
makeStringError(EC, "cannot create build ID link directory"));
sys::path::append(Path,
llvm::toHex(BuildIdBytes.slice(1), /*LowerCase*/ true));
Path += Suffix;
SmallString<128> TmpPath;
// create_hard_link races so we need to link to a temporary path but
// we want to make sure that we choose a filename that does not exist.
// By using 32 model characters we get 128-bits of entropy. It is
// unlikely that this string has ever existed before much less exists
// on this disk or in the current working directory.
// Additionally we prepend the original Path for debugging but also
// because it ensures that we're linking within a directory on the same
// partition on the same device which is critical. It has the added
// win of yet further decreasing the odds of a conflict.
sys::fs::createUniquePath(Twine(Path) + "-" + MODEL_32 + ".tmp", TmpPath,
/*MakeAbsolute*/ false);
if (auto EC = sys::fs::create_hard_link(ToLink, TmpPath)) {
Path.push_back('\0');
return makeStringError(EC, "cannot link '%s' to '%s'", ToLink.data(),
Path.data());
}
// We then atomically rename the link into place which will just move the
// link. If rename fails something is more seriously wrong so just return
// an error.
if (auto EC = sys::fs::rename(TmpPath, Path)) {
Path.push_back('\0');
return makeStringError(EC, "cannot link '%s' to '%s'", ToLink.data(),
Path.data());
}
// If `Path` was already a hard-link to the same underlying file then the
// temp file will be left so we need to remove it. Remove will not cause
// an error by default if the file is already gone so just blindly remove
// it rather than checking.
if (auto EC = sys::fs::remove(TmpPath)) {
TmpPath.push_back('\0');
return makeStringError(EC, "could not remove '%s'", TmpPath.data());
}
return Error::success();
}
static Error splitDWOToFile(const CopyConfig &Config, const Reader &Reader,
StringRef File, ElfType OutputElfType) {
auto DWOFile = Reader.create();
auto OnlyKeepDWOPred = [&DWOFile](const SectionBase &Sec) {
return onlyKeepDWOPred(*DWOFile, Sec);
};
if (Error E = DWOFile->removeSections(Config.AllowBrokenLinks,
OnlyKeepDWOPred))
return E;
if (Config.OutputArch) {
DWOFile->Machine = Config.OutputArch.getValue().EMachine;
DWOFile->OSABI = Config.OutputArch.getValue().OSABI;
}
FileBuffer FB(File);
auto Writer = createWriter(Config, *DWOFile, FB, OutputElfType);
if (Error E = Writer->finalize())
return E;
return Writer->write();
}
static Error dumpSectionToFile(StringRef SecName, StringRef Filename,
Object &Obj) {
for (auto &Sec : Obj.sections()) {
if (Sec.Name == SecName) {
if (Sec.OriginalData.empty())
return createStringError(object_error::parse_failed,
"cannot dump section '%s': it has no contents",
SecName.str().c_str());
Expected<std::unique_ptr<FileOutputBuffer>> BufferOrErr =
FileOutputBuffer::create(Filename, Sec.OriginalData.size());
if (!BufferOrErr)
return BufferOrErr.takeError();
std::unique_ptr<FileOutputBuffer> Buf = std::move(*BufferOrErr);
std::copy(Sec.OriginalData.begin(), Sec.OriginalData.end(),
Buf->getBufferStart());
if (Error E = Buf->commit())
return E;
return Error::success();
}
}
return createStringError(object_error::parse_failed, "section '%s' not found",
SecName.str().c_str());
}
static bool isCompressable(const SectionBase &Section) {
return !(Section.Flags & ELF::SHF_COMPRESSED) &&
StringRef(Section.Name).startswith(".debug");
}
static void replaceDebugSections(
Object &Obj, SectionPred &RemovePred,
function_ref<bool(const SectionBase &)> shouldReplace,
function_ref<SectionBase *(const SectionBase *)> addSection) {
// Build a list of the debug sections we are going to replace.
// We can't call `addSection` while iterating over sections,
// because it would mutate the sections array.
SmallVector<SectionBase *, 13> ToReplace;
for (auto &Sec : Obj.sections())
if (shouldReplace(Sec))
ToReplace.push_back(&Sec);
// Build a mapping from original section to a new one.
DenseMap<SectionBase *, SectionBase *> FromTo;
for (SectionBase *S : ToReplace)
FromTo[S] = addSection(S);
// Now we want to update the target sections of relocation
// sections. Also we will update the relocations themselves
// to update the symbol references.
for (auto &Sec : Obj.sections())
Sec.replaceSectionReferences(FromTo);
RemovePred = [shouldReplace, RemovePred](const SectionBase &Sec) {
return shouldReplace(Sec) || RemovePred(Sec);
};
}
static bool isUnneededSymbol(const Symbol &Sym) {
return !Sym.Referenced &&
(Sym.Binding == STB_LOCAL || Sym.getShndx() == SHN_UNDEF) &&
Sym.Type != STT_SECTION;
}
static Error updateAndRemoveSymbols(const CopyConfig &Config, Object &Obj) {
// TODO: update or remove symbols only if there is an option that affects
// them.
if (!Obj.SymbolTable)
return Error::success();
Obj.SymbolTable->updateSymbols([&](Symbol &Sym) {
// Common and undefined symbols don't make sense as local symbols, and can
// even cause crashes if we localize those, so skip them.
if (!Sym.isCommon() && Sym.getShndx() != SHN_UNDEF &&
((Config.LocalizeHidden &&
(Sym.Visibility == STV_HIDDEN || Sym.Visibility == STV_INTERNAL)) ||
is_contained(Config.SymbolsToLocalize, Sym.Name)))
Sym.Binding = STB_LOCAL;
// Note: these two globalize flags have very similar names but different
// meanings:
//
// --globalize-symbol: promote a symbol to global
// --keep-global-symbol: all symbols except for these should be made local
//
// If --globalize-symbol is specified for a given symbol, it will be
// global in the output file even if it is not included via
// --keep-global-symbol. Because of that, make sure to check
// --globalize-symbol second.
if (!Config.SymbolsToKeepGlobal.empty() &&
!is_contained(Config.SymbolsToKeepGlobal, Sym.Name) &&
Sym.getShndx() != SHN_UNDEF)
Sym.Binding = STB_LOCAL;
if (is_contained(Config.SymbolsToGlobalize, Sym.Name) &&
Sym.getShndx() != SHN_UNDEF)
Sym.Binding = STB_GLOBAL;
if (is_contained(Config.SymbolsToWeaken, Sym.Name) &&
Sym.Binding == STB_GLOBAL)
Sym.Binding = STB_WEAK;
if (Config.Weaken && Sym.Binding == STB_GLOBAL &&
Sym.getShndx() != SHN_UNDEF)
Sym.Binding = STB_WEAK;
const auto I = Config.SymbolsToRename.find(Sym.Name);
if (I != Config.SymbolsToRename.end())
Sym.Name = I->getValue();
if (!Config.SymbolsPrefix.empty() && Sym.Type != STT_SECTION)
Sym.Name = (Config.SymbolsPrefix + Sym.Name).str();
});
// The purpose of this loop is to mark symbols referenced by sections
// (like GroupSection or RelocationSection). This way, we know which
// symbols are still 'needed' and which are not.
if (Config.StripUnneeded || !Config.UnneededSymbolsToRemove.empty() ||
!Config.OnlySection.empty()) {
for (auto &Section : Obj.sections())
Section.markSymbols();
}
auto RemoveSymbolsPred = [&](const Symbol &Sym) {
if (is_contained(Config.SymbolsToKeep, Sym.Name) ||
(Config.KeepFileSymbols && Sym.Type == STT_FILE))
return false;
if ((Config.DiscardMode == DiscardType::All ||
(Config.DiscardMode == DiscardType::Locals &&
StringRef(Sym.Name).startswith(".L"))) &&
Sym.Binding == STB_LOCAL && Sym.getShndx() != SHN_UNDEF &&
Sym.Type != STT_FILE && Sym.Type != STT_SECTION)
return true;
if (Config.StripAll || Config.StripAllGNU)
return true;
if (is_contained(Config.SymbolsToRemove, Sym.Name))
return true;
if ((Config.StripUnneeded ||
is_contained(Config.UnneededSymbolsToRemove, Sym.Name)) &&
isUnneededSymbol(Sym))
return true;
// We want to remove undefined symbols if all references have been stripped.
if (!Config.OnlySection.empty() && !Sym.Referenced &&
Sym.getShndx() == SHN_UNDEF)
return true;
return false;
};
return Obj.removeSymbols(RemoveSymbolsPred);
}
static Error replaceAndRemoveSections(const CopyConfig &Config, Object &Obj) {
SectionPred RemovePred = [](const SectionBase &) { return false; };
// Removes:
if (!Config.ToRemove.empty()) {
RemovePred = [&Config](const SectionBase &Sec) {
return is_contained(Config.ToRemove, Sec.Name);
};
}
if (Config.StripDWO || !Config.SplitDWO.empty())
RemovePred = [RemovePred](const SectionBase &Sec) {
return isDWOSection(Sec) || RemovePred(Sec);
};
if (Config.ExtractDWO)
RemovePred = [RemovePred, &Obj](const SectionBase &Sec) {
return onlyKeepDWOPred(Obj, Sec) || RemovePred(Sec);
};
if (Config.StripAllGNU)
RemovePred = [RemovePred, &Obj](const SectionBase &Sec) {
if (RemovePred(Sec))
return true;
if ((Sec.Flags & SHF_ALLOC) != 0)
return false;
if (&Sec == Obj.SectionNames)
return false;
switch (Sec.Type) {
case SHT_SYMTAB:
case SHT_REL:
case SHT_RELA:
case SHT_STRTAB:
return true;
}
return isDebugSection(Sec);
};
if (Config.StripSections) {
RemovePred = [RemovePred](const SectionBase &Sec) {
return RemovePred(Sec) || Sec.ParentSegment == nullptr;
};
}
if (Config.StripDebug) {
RemovePred = [RemovePred](const SectionBase &Sec) {
return RemovePred(Sec) || isDebugSection(Sec);
};
}
if (Config.StripNonAlloc)
RemovePred = [RemovePred, &Obj](const SectionBase &Sec) {
if (RemovePred(Sec))
return true;
if (&Sec == Obj.SectionNames)
return false;
return (Sec.Flags & SHF_ALLOC) == 0 && Sec.ParentSegment == nullptr;
};
if (Config.StripAll)
RemovePred = [RemovePred, &Obj](const SectionBase &Sec) {
if (RemovePred(Sec))
return true;
if (&Sec == Obj.SectionNames)
return false;
if (StringRef(Sec.Name).startswith(".gnu.warning"))
return false;
+ // We keep the .ARM.attribute section to maintain compatibility
+ // with Debian derived distributions. This is a bug in their
+ // patchset as documented here:
+ // https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=943798
+ if (Sec.Type == SHT_ARM_ATTRIBUTES)
+ return false;
if (Sec.ParentSegment != nullptr)
return false;
return (Sec.Flags & SHF_ALLOC) == 0;
};
if (Config.ExtractPartition || Config.ExtractMainPartition) {
RemovePred = [RemovePred](const SectionBase &Sec) {
if (RemovePred(Sec))
return true;
if (Sec.Type == SHT_LLVM_PART_EHDR || Sec.Type == SHT_LLVM_PART_PHDR)
return true;
return (Sec.Flags & SHF_ALLOC) != 0 && !Sec.ParentSegment;
};
}
// Explicit copies:
if (!Config.OnlySection.empty()) {
RemovePred = [&Config, RemovePred, &Obj](const SectionBase &Sec) {
// Explicitly keep these sections regardless of previous removes.
if (is_contained(Config.OnlySection, Sec.Name))
return false;
// Allow all implicit removes.
if (RemovePred(Sec))
return true;
// Keep special sections.
if (Obj.SectionNames == &Sec)
return false;
if (Obj.SymbolTable == &Sec ||
(Obj.SymbolTable && Obj.SymbolTable->getStrTab() == &Sec))
return false;
// Remove everything else.
return true;
};
}
if (!Config.KeepSection.empty()) {
RemovePred = [&Config, RemovePred](const SectionBase &Sec) {
// Explicitly keep these sections regardless of previous removes.
if (is_contained(Config.KeepSection, Sec.Name))
return false;
// Otherwise defer to RemovePred.
return RemovePred(Sec);
};
}
// This has to be the last predicate assignment.
// If the option --keep-symbol has been specified
// and at least one of those symbols is present
// (equivalently, the updated symbol table is not empty)
// the symbol table and the string table should not be removed.
if ((!Config.SymbolsToKeep.empty() || Config.KeepFileSymbols) &&
Obj.SymbolTable && !Obj.SymbolTable->empty()) {
RemovePred = [&Obj, RemovePred](const SectionBase &Sec) {
if (&Sec == Obj.SymbolTable || &Sec == Obj.SymbolTable->getStrTab())
return false;
return RemovePred(Sec);
};
}
if (Config.CompressionType != DebugCompressionType::None)
replaceDebugSections(Obj, RemovePred, isCompressable,
[&Config, &Obj](const SectionBase *S) {
return &Obj.addSection<CompressedSection>(
*S, Config.CompressionType);
});
else if (Config.DecompressDebugSections)
replaceDebugSections(
Obj, RemovePred,
[](const SectionBase &S) { return isa<CompressedSection>(&S); },
[&Obj](const SectionBase *S) {
auto CS = cast<CompressedSection>(S);
return &Obj.addSection<DecompressedSection>(*CS);
});
return Obj.removeSections(Config.AllowBrokenLinks, RemovePred);
}
// This function handles the high level operations of GNU objcopy including
// handling command line options. It's important to outline certain properties
// we expect to hold of the command line operations. Any operation that "keeps"
// should keep regardless of a remove. Additionally any removal should respect
// any previous removals. Lastly whether or not something is removed shouldn't
// depend a) on the order the options occur in or b) on some opaque priority
// system. The only priority is that keeps/copies overrule removes.
static Error handleArgs(const CopyConfig &Config, Object &Obj,
const Reader &Reader, ElfType OutputElfType) {
if (!Config.SplitDWO.empty())
if (Error E =
splitDWOToFile(Config, Reader, Config.SplitDWO, OutputElfType))
return E;
if (Config.OutputArch) {
Obj.Machine = Config.OutputArch.getValue().EMachine;
Obj.OSABI = Config.OutputArch.getValue().OSABI;
}
// It is important to remove the sections first. For example, we want to
// remove the relocation sections before removing the symbols. That allows
// us to avoid reporting the inappropriate errors about removing symbols
// named in relocations.
if (Error E = replaceAndRemoveSections(Config, Obj))
return E;
if (Error E = updateAndRemoveSymbols(Config, Obj))
return E;
if (!Config.SectionsToRename.empty() || !Config.AllocSectionsPrefix.empty()) {
DenseSet<SectionBase *> PrefixedSections;
for (auto &Sec : Obj.sections()) {
const auto Iter = Config.SectionsToRename.find(Sec.Name);
if (Iter != Config.SectionsToRename.end()) {
const SectionRename &SR = Iter->second;
Sec.Name = SR.NewName;
if (SR.NewFlags.hasValue())
setSectionFlagsAndType(Sec, SR.NewFlags.getValue());
}
// Add a prefix to allocated sections and their relocation sections. This
// should be done after renaming the section by Config.SectionToRename to
// imitate the GNU objcopy behavior.
if (!Config.AllocSectionsPrefix.empty()) {
if (Sec.Flags & SHF_ALLOC) {
Sec.Name = (Config.AllocSectionsPrefix + Sec.Name).str();
PrefixedSections.insert(&Sec);
// Rename relocation sections associated to the allocated sections.
// For example, if we rename .text to .prefix.text, we also rename
// .rel.text to .rel.prefix.text.
//
// Dynamic relocation sections (SHT_REL[A] with SHF_ALLOC) are handled
// above, e.g., .rela.plt is renamed to .prefix.rela.plt, not
// .rela.prefix.plt since GNU objcopy does so.
} else if (auto *RelocSec = dyn_cast<RelocationSectionBase>(&Sec)) {
auto *TargetSec = RelocSec->getSection();
if (TargetSec && (TargetSec->Flags & SHF_ALLOC)) {
StringRef prefix;
switch (Sec.Type) {
case SHT_REL:
prefix = ".rel";
break;
case SHT_RELA:
prefix = ".rela";
break;
default:
continue;
}
// If the relocation section comes *after* the target section, we
// don't add Config.AllocSectionsPrefix because we've already added
// the prefix to TargetSec->Name. Otherwise, if the relocation
// section comes *before* the target section, we add the prefix.
if (PrefixedSections.count(TargetSec)) {
Sec.Name = (prefix + TargetSec->Name).str();
} else {
const auto Iter = Config.SectionsToRename.find(TargetSec->Name);
if (Iter != Config.SectionsToRename.end()) {
// Both `--rename-section` and `--prefix-alloc-sections` are
// given but the target section is not yet renamed.
Sec.Name =
(prefix + Config.AllocSectionsPrefix + Iter->second.NewName)
.str();
} else {
Sec.Name =
(prefix + Config.AllocSectionsPrefix + TargetSec->Name)
.str();
}
}
}
}
}
}
}
if (!Config.SetSectionFlags.empty()) {
for (auto &Sec : Obj.sections()) {
const auto Iter = Config.SetSectionFlags.find(Sec.Name);
if (Iter != Config.SetSectionFlags.end()) {
const SectionFlagsUpdate &SFU = Iter->second;
setSectionFlagsAndType(Sec, SFU.NewFlags);
}
}
}
for (const auto &Flag : Config.AddSection) {
std::pair<StringRef, StringRef> SecPair = Flag.split("=");
StringRef SecName = SecPair.first;
StringRef File = SecPair.second;
ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
MemoryBuffer::getFile(File);
if (!BufOrErr)
return createFileError(File, errorCodeToError(BufOrErr.getError()));
std::unique_ptr<MemoryBuffer> Buf = std::move(*BufOrErr);
ArrayRef<uint8_t> Data(
reinterpret_cast<const uint8_t *>(Buf->getBufferStart()),
Buf->getBufferSize());
OwnedDataSection &NewSection =
Obj.addSection<OwnedDataSection>(SecName, Data);
if (SecName.startswith(".note") && SecName != ".note.GNU-stack")
NewSection.Type = SHT_NOTE;
}
for (const auto &Flag : Config.DumpSection) {
std::pair<StringRef, StringRef> SecPair = Flag.split("=");
StringRef SecName = SecPair.first;
StringRef File = SecPair.second;
if (Error E = dumpSectionToFile(SecName, File, Obj))
return E;
}
if (!Config.AddGnuDebugLink.empty())
Obj.addSection<GnuDebugLinkSection>(Config.AddGnuDebugLink,
Config.GnuDebugLinkCRC32);
for (const NewSymbolInfo &SI : Config.SymbolsToAdd) {
SectionBase *Sec = Obj.findSection(SI.SectionName);
uint64_t Value = Sec ? Sec->Addr + SI.Value : SI.Value;
Obj.SymbolTable->addSymbol(
SI.SymbolName, SI.Bind, SI.Type, Sec, Value, SI.Visibility,
Sec ? (uint16_t)SYMBOL_SIMPLE_INDEX : (uint16_t)SHN_ABS, 0);
}
if (Config.EntryExpr)
Obj.Entry = Config.EntryExpr(Obj.Entry);
return Error::success();
}
static Error writeOutput(const CopyConfig &Config, Object &Obj, Buffer &Out,
ElfType OutputElfType) {
std::unique_ptr<Writer> Writer =
createWriter(Config, Obj, Out, OutputElfType);
if (Error E = Writer->finalize())
return E;
return Writer->write();
}
Error executeObjcopyOnIHex(const CopyConfig &Config, MemoryBuffer &In,
Buffer &Out) {
IHexReader Reader(&In);
std::unique_ptr<Object> Obj = Reader.create();
const ElfType OutputElfType =
getOutputElfType(Config.OutputArch.getValueOr(Config.BinaryArch));
if (Error E = handleArgs(Config, *Obj, Reader, OutputElfType))
return E;
return writeOutput(Config, *Obj, Out, OutputElfType);
}
Error executeObjcopyOnRawBinary(const CopyConfig &Config, MemoryBuffer &In,
Buffer &Out) {
BinaryReader Reader(Config.BinaryArch, &In);
std::unique_ptr<Object> Obj = Reader.create();
// Prefer OutputArch (-O<format>) if set, otherwise fallback to BinaryArch
// (-B<arch>).
const ElfType OutputElfType =
getOutputElfType(Config.OutputArch.getValueOr(Config.BinaryArch));
if (Error E = handleArgs(Config, *Obj, Reader, OutputElfType))
return E;
return writeOutput(Config, *Obj, Out, OutputElfType);
}
Error executeObjcopyOnBinary(const CopyConfig &Config,
object::ELFObjectFileBase &In, Buffer &Out) {
ELFReader Reader(&In, Config.ExtractPartition);
std::unique_ptr<Object> Obj = Reader.create();
// Prefer OutputArch (-O<format>) if set, otherwise infer it from the input.
const ElfType OutputElfType =
Config.OutputArch ? getOutputElfType(Config.OutputArch.getValue())
: getOutputElfType(In);
ArrayRef<uint8_t> BuildIdBytes;
if (!Config.BuildIdLinkDir.empty()) {
auto BuildIdBytesOrErr = findBuildID(Config, In);
if (auto E = BuildIdBytesOrErr.takeError())
return E;
BuildIdBytes = *BuildIdBytesOrErr;
if (BuildIdBytes.size() < 2)
return createFileError(
Config.InputFilename,
createStringError(object_error::parse_failed,
"build ID is smaller than two bytes"));
}
if (!Config.BuildIdLinkDir.empty() && Config.BuildIdLinkInput)
if (Error E =
linkToBuildIdDir(Config, Config.InputFilename,
Config.BuildIdLinkInput.getValue(), BuildIdBytes))
return E;
if (Error E = handleArgs(Config, *Obj, Reader, OutputElfType))
return createFileError(Config.InputFilename, std::move(E));
if (Error E = writeOutput(Config, *Obj, Out, OutputElfType))
return createFileError(Config.InputFilename, std::move(E));
if (!Config.BuildIdLinkDir.empty() && Config.BuildIdLinkOutput)
if (Error E =
linkToBuildIdDir(Config, Config.OutputFilename,
Config.BuildIdLinkOutput.getValue(), BuildIdBytes))
return createFileError(Config.OutputFilename, std::move(E));
return Error::success();
}
} // end namespace elf
} // end namespace objcopy
} // end namespace llvm
Index: stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td (revision 356465)
@@ -1,288 +1,289 @@
include "llvm/Option/OptParser.td"
multiclass Eq<string name, string help> {
def NAME : Separate<["--"], name>;
def NAME #_eq : Joined<["--"], name #"=">,
Alias<!cast<Separate>(NAME)>,
HelpText<help>;
}
def help : Flag<["--"], "help">;
def h : Flag<["-"], "h">, Alias<help>;
def allow_broken_links
: Flag<["--"], "allow-broken-links">,
HelpText<"Allow llvm-objcopy to remove sections even if it would leave "
"invalid section references. The appropriate sh_link fields "
"will be set to zero.">;
defm binary_architecture
: Eq<"binary-architecture", "Used when transforming an architecture-less "
"format (such as binary) to another format">;
def B : JoinedOrSeparate<["-"], "B">, Alias<binary_architecture>;
defm target : Eq<"target", "Format of the input and output file">,
Values<"binary">;
def F : JoinedOrSeparate<["-"], "F">, Alias<target>;
defm input_target : Eq<"input-target", "Format of the input file">,
Values<"binary">;
def I : JoinedOrSeparate<["-"], "I">, Alias<input_target>;
defm output_target : Eq<"output-target", "Format of the output file">,
Values<"binary">;
def O : JoinedOrSeparate<["-"], "O">, Alias<output_target>;
def compress_debug_sections : Flag<["--"], "compress-debug-sections">;
def compress_debug_sections_eq
: Joined<["--"], "compress-debug-sections=">,
MetaVarName<"[ zlib | zlib-gnu ]">,
HelpText<"Compress DWARF debug sections using specified style. Supported "
"styles: 'zlib-gnu' and 'zlib'">;
def decompress_debug_sections : Flag<["--"], "decompress-debug-sections">,
HelpText<"Decompress DWARF debug sections.">;
defm split_dwo
: Eq<"split-dwo", "Equivalent to extract-dwo on the input file to "
"<dwo-file>, then strip-dwo on the input file">,
MetaVarName<"dwo-file">;
def enable_deterministic_archives
: Flag<["--"], "enable-deterministic-archives">,
HelpText<"Enable deterministic mode when copying archives (use zero for "
"UIDs, GIDs, and timestamps).">;
def D : Flag<["-"], "D">,
Alias<enable_deterministic_archives>,
HelpText<"Alias for --enable-deterministic-archives">;
def disable_deterministic_archives
: Flag<["--"], "disable-deterministic-archives">,
HelpText<"Disable deterministic mode when copying archives (use real "
"values for UIDs, GIDs, and timestamps).">;
def U : Flag<["-"], "U">,
Alias<disable_deterministic_archives>,
HelpText<"Alias for --disable-deterministic-archives">;
def preserve_dates : Flag<["--"], "preserve-dates">,
HelpText<"Preserve access and modification timestamps">;
def p : Flag<["-"], "p">, Alias<preserve_dates>;
defm add_gnu_debuglink
: Eq<"add-gnu-debuglink", "Add a .gnu_debuglink for <debug-file>">,
MetaVarName<"debug-file">;
defm remove_section : Eq<"remove-section", "Remove <section>">,
MetaVarName<"section">;
def R : JoinedOrSeparate<["-"], "R">, Alias<remove_section>;
defm rename_section
: Eq<"rename-section",
"Renames a section from old to new, optionally with specified flags. "
"Flags supported for GNU compatibility: alloc, load, noload, "
"readonly, debug, code, data, rom, share, contents, merge, strings.">,
MetaVarName<"old=new[,flag1,...]">;
defm redefine_symbol
: Eq<"redefine-sym", "Change the name of a symbol old to new">,
MetaVarName<"old=new">;
defm redefine_symbols
: Eq<"redefine-syms",
"Reads a list of symbol pairs from <filename> and runs as if "
"--redefine-sym=<old>=<new> is set for each one. <filename> "
"contains two symbols per line separated with whitespace and may "
"contain comments beginning with '#'. Leading and trailing "
"whitespace is stripped from each line. May be repeated to read "
"symbols from many files.">,
MetaVarName<"filename">;
defm keep_section : Eq<"keep-section", "Keep <section>">,
MetaVarName<"section">;
defm only_section : Eq<"only-section", "Remove all but <section>">,
MetaVarName<"section">;
def j : JoinedOrSeparate<["-"], "j">, Alias<only_section>;
defm add_section
: Eq<"add-section",
"Make a section named <section> with the contents of <file>.">,
MetaVarName<"section=file">;
defm set_section_flags
: Eq<"set-section-flags",
"Set section flags for a given section. Flags supported for GNU "
"compatibility: alloc, load, noload, readonly, debug, code, data, "
"rom, share, contents, merge, strings.">,
MetaVarName<"section=flag1[,flag2,...]">;
def strip_all : Flag<["--"], "strip-all">,
HelpText<"Remove non-allocated sections outside segments. "
- ".gnu.warning* sections are not removed">;
+ ".gnu.warning* and .ARM.attribute sections are not "
+ "removed">;
def S : Flag<["-"], "S">, Alias<strip_all>;
def strip_all_gnu : Flag<["--"], "strip-all-gnu">,
HelpText<"Compatible with GNU objcopy's --strip-all">;
def strip_debug : Flag<["--"], "strip-debug">,
HelpText<"Remove all debug information">;
def g : Flag<["-"], "g">, Alias<strip_debug>,
HelpText<"Alias for --strip-debug">;
def strip_dwo : Flag<["--"], "strip-dwo">,
HelpText<"Remove all DWARF .dwo sections from file">;
def strip_sections
: Flag<["--"], "strip-sections">,
HelpText<"Remove all section headers and all sections not in segments">;
def strip_non_alloc
: Flag<["--"], "strip-non-alloc">,
HelpText<"Remove all non-allocated sections outside segments">;
def strip_unneeded : Flag<["--"], "strip-unneeded">,
HelpText<"Remove all symbols not needed by relocations">;
defm strip_unneeded_symbol
: Eq<"strip-unneeded-symbol",
"Remove symbol <symbol> if it is not needed by relocations">,
MetaVarName<"symbol">;
defm strip_unneeded_symbols
: Eq<"strip-unneeded-symbols",
"Reads a list of symbols from <filename> and removes them "
"if they are not needed by relocations">,
MetaVarName<"filename">;
def extract_dwo
: Flag<["--"], "extract-dwo">,
HelpText<
"Remove all sections that are not DWARF .dwo sections from file">;
defm extract_partition
: Eq<"extract-partition", "Extract named partition from input file">,
MetaVarName<"name">;
def extract_main_partition
: Flag<["--"], "extract-main-partition">,
HelpText<"Extract main partition from the input file">;
def localize_hidden
: Flag<["--"], "localize-hidden">,
HelpText<
"Mark all symbols that have hidden or internal visibility as local">;
defm localize_symbol : Eq<"localize-symbol", "Mark <symbol> as local">,
MetaVarName<"symbol">;
defm localize_symbols
: Eq<"localize-symbols",
"Reads a list of symbols from <filename> and marks them local.">,
MetaVarName<"filename">;
def L : JoinedOrSeparate<["-"], "L">, Alias<localize_symbol>;
defm globalize_symbol : Eq<"globalize-symbol", "Mark <symbol> as global">,
MetaVarName<"symbol">;
defm globalize_symbols
: Eq<"globalize-symbols",
"Reads a list of symbols from <filename> and marks them global.">,
MetaVarName<"filename">;
defm keep_global_symbol
: Eq<"keep-global-symbol",
"Convert all symbols except <symbol> to local. May be repeated to "
"convert all except a set of symbols to local.">,
MetaVarName<"symbol">;
def G : JoinedOrSeparate<["-"], "G">, Alias<keep_global_symbol>;
defm keep_global_symbols
: Eq<"keep-global-symbols",
"Reads a list of symbols from <filename> and runs as if "
"--keep-global-symbol=<symbol> is set for each one. <filename> "
"contains one symbol per line and may contain comments beginning with "
"'#'. Leading and trailing whitespace is stripped from each line. May "
"be repeated to read symbols from many files.">,
MetaVarName<"filename">;
defm weaken_symbol : Eq<"weaken-symbol", "Mark <symbol> as weak">,
MetaVarName<"symbol">;
defm weaken_symbols
: Eq<"weaken-symbols",
"Reads a list of symbols from <filename> and marks them weak.">,
MetaVarName<"filename">;
def W : JoinedOrSeparate<["-"], "W">, Alias<weaken_symbol>;
def weaken : Flag<["--"], "weaken">,
HelpText<"Mark all global symbols as weak">;
def discard_locals : Flag<["--"], "discard-locals">,
HelpText<"Remove compiler-generated local symbols, (e.g. "
"symbols starting with .L)">;
def X : Flag<["-"], "X">, Alias<discard_locals>;
def discard_all
: Flag<["--"], "discard-all">,
HelpText<"Remove all local symbols except file and section symbols">;
def x : Flag<["-"], "x">, Alias<discard_all>;
defm strip_symbol : Eq<"strip-symbol", "Remove symbol <symbol>">,
MetaVarName<"symbol">;
defm strip_symbols
: Eq<"strip-symbols",
"Reads a list of symbols from <filename> and removes them.">,
MetaVarName<"filename">;
def N : JoinedOrSeparate<["-"], "N">, Alias<strip_symbol>;
defm keep_symbol : Eq<"keep-symbol", "Do not remove symbol <symbol>">,
MetaVarName<"symbol">;
def K : JoinedOrSeparate<["-"], "K">, Alias<keep_symbol>;
defm keep_symbols
: Eq<"keep-symbols",
"Reads a list of symbols from <filename> and runs as if "
"--keep-symbol=<symbol> is set for each one. <filename> "
"contains one symbol per line and may contain comments beginning with "
"'#'. Leading and trailing whitespace is stripped from each line. May "
"be repeated to read symbols from many files.">,
MetaVarName<"filename">;
def only_keep_debug
: Flag<["--"], "only-keep-debug">,
HelpText<"Clear sections that would not be stripped by --strip-debug. "
"Currently only implemented for COFF.">;
def keep_file_symbols : Flag<["--"], "keep-file-symbols">,
HelpText<"Do not remove file symbols">;
defm dump_section
: Eq<"dump-section",
"Dump contents of section named <section> into file <file>">,
MetaVarName<"section=file">;
defm prefix_symbols
: Eq<"prefix-symbols", "Add <prefix> to the start of every symbol name">,
MetaVarName<"prefix">;
defm prefix_alloc_sections
: Eq<"prefix-alloc-sections", "Add <prefix> to the start of every allocated section name">,
MetaVarName<"prefix">;
def version : Flag<["--"], "version">,
HelpText<"Print the version and exit.">;
def V : Flag<["-"], "V">, Alias<version>;
defm build_id_link_dir
: Eq<"build-id-link-dir", "Set directory for --build-id-link-input and "
"--build-id-link-output to <dir>">,
MetaVarName<"dir">;
defm build_id_link_input
: Eq<"build-id-link-input", "Hard-link the input to <dir>/xx/xxx<suffix> "
"name derived from hex build ID">,
MetaVarName<"suffix">;
defm build_id_link_output
: Eq<"build-id-link-output", "Hard-link the output to <dir>/xx/xxx<suffix> "
"name derived from hex build ID">,
MetaVarName<"suffix">;
def regex
: Flag<["--"], "regex">,
HelpText<"Permit regular expressions in name comparison">;
defm set_start : Eq<"set-start", "Set the start address to <addr>. Overrides "
"any previous --change-start or --adjust-start values.">,
MetaVarName<"addr">;
defm change_start : Eq<"change-start", "Add <incr> to the start address. Can be "
"specified multiple times, all values will be applied "
"cumulatively.">,
MetaVarName<"incr">;
def adjust_start : JoinedOrSeparate<["--"], "adjust-start">,
Alias<change_start>;
defm add_symbol
: Eq<"add-symbol", "Add new symbol <name> to .symtab. Accepted flags: "
"global, local, weak, default, hidden, file, section, object, "
"function, indirect-function. Accepted but ignored for "
"compatibility: debug, constructor, warning, indirect, synthetic, "
"unique-object, before.">,
MetaVarName<"name=[section:]value[,flags]">;
Index: stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/StripOpts.td
===================================================================
--- stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/StripOpts.td (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/tools/llvm-objcopy/StripOpts.td (revision 356465)
@@ -1,96 +1,97 @@
include "llvm/Option/OptParser.td"
multiclass Eq<string name, string help> {
def NAME : Separate<["--"], name>;
def NAME #_eq : Joined<["--"], name #"=">,
Alias<!cast<Separate>(NAME)>,
HelpText<help>;
}
def help : Flag<["--"], "help">;
def h : Flag<["-"], "h">, Alias<help>;
def allow_broken_links
: Flag<["--"], "allow-broken-links">,
HelpText<"Allow llvm-strip to remove sections even if it would leave "
"invalid section references. The appropriate sh_link fields "
"will be set to zero.">;
def enable_deterministic_archives
: Flag<["--"], "enable-deterministic-archives">,
HelpText<"Enable deterministic mode when stripping archives (use zero "
"for UIDs, GIDs, and timestamps).">;
def D : Flag<["-"], "D">,
Alias<enable_deterministic_archives>,
HelpText<"Alias for --enable-deterministic-archives">;
def disable_deterministic_archives
: Flag<["--"], "disable-deterministic-archives">,
HelpText<"Disable deterministic mode when stripping archives (use real "
"values for UIDs, GIDs, and timestamps).">;
def U : Flag<["-"], "U">,
Alias<disable_deterministic_archives>,
HelpText<"Alias for --disable-deterministic-archives">;
def output : JoinedOrSeparate<["-"], "o">, HelpText<"Write output to <file>">;
def preserve_dates : Flag<["--"], "preserve-dates">,
HelpText<"Preserve access and modification timestamps">;
def p : Flag<["-"], "p">, Alias<preserve_dates>;
def strip_all : Flag<["--"], "strip-all">,
HelpText<"Remove non-allocated sections outside segments. "
- ".gnu.warning* sections are not removed">;
+ ".gnu.warning* and .ARM.attribute sections are not "
+ "removed">;
def s : Flag<["-"], "s">, Alias<strip_all>;
def no_strip_all : Flag<["--"], "no-strip-all">,
HelpText<"Disable --strip-all">;
def strip_all_gnu : Flag<["--"], "strip-all-gnu">,
HelpText<"Compatible with GNU strip's --strip-all">;
def strip_debug : Flag<["--"], "strip-debug">,
HelpText<"Remove debugging symbols only">;
def d : Flag<["-"], "d">, Alias<strip_debug>;
def g : Flag<["-"], "g">, Alias<strip_debug>;
def S : Flag<["-"], "S">, Alias<strip_debug>;
def strip_unneeded : Flag<["--"], "strip-unneeded">,
HelpText<"Remove all symbols not needed by relocations">;
defm remove_section : Eq<"remove-section", "Remove <section>">,
MetaVarName<"section">;
def R : JoinedOrSeparate<["-"], "R">, Alias<remove_section>;
defm strip_symbol : Eq<"strip-symbol", "Strip <symbol>">,
MetaVarName<"symbol">;
def N : JoinedOrSeparate<["-"], "N">, Alias<strip_symbol>;
defm keep_section : Eq<"keep-section", "Keep <section>">,
MetaVarName<"section">;
defm keep_symbol : Eq<"keep-symbol", "Do not remove symbol <symbol>">,
MetaVarName<"symbol">;
def keep_file_symbols : Flag<["--"], "keep-file-symbols">,
HelpText<"Do not remove file symbols">;
def K : JoinedOrSeparate<["-"], "K">, Alias<keep_symbol>;
def only_keep_debug
: Flag<["--"], "only-keep-debug">,
HelpText<"Clear sections that would not be stripped by --strip-debug. "
"Currently only implemented for COFF.">;
def discard_locals : Flag<["--"], "discard-locals">,
HelpText<"Remove compiler-generated local symbols, (e.g. "
"symbols starting with .L)">;
def X : Flag<["-"], "X">, Alias<discard_locals>;
def discard_all
: Flag<["--"], "discard-all">,
HelpText<"Remove all local symbols except file and section symbols">;
def x : Flag<["-"], "x">, Alias<discard_all>;
def regex
: Flag<["--"], "regex">,
HelpText<"Permit regular expressions in name comparison">;
def version : Flag<["--"], "version">,
HelpText<"Print the version and exit.">;
def V : Flag<["-"], "V">, Alias<version>;
Index: stable/12/contrib/llvm-project/llvm/tools/opt/opt.cpp
===================================================================
--- stable/12/contrib/llvm-project/llvm/tools/opt/opt.cpp (revision 356464)
+++ stable/12/contrib/llvm-project/llvm/tools/opt/opt.cpp (revision 356465)
@@ -1,937 +1,936 @@
//===- opt.cpp - The LLVM Modular Optimizer -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Optimizations may be specified an arbitrary number of times on the command
// line, They are run in the order specified.
//
//===----------------------------------------------------------------------===//
#include "BreakpointPrinter.h"
#include "Debugify.h"
#include "NewPMDriver.h"
#include "PassPrinters.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/CodeGen/CommandFlags.inc"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/LegacyPassNameParser.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/RemarkStreamer.h"
#include "llvm/IR/Verifier.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/InitializePasses.h"
#include "llvm/LinkAllIR.h"
#include "llvm/LinkAllPasses.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/SystemUtils.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <algorithm>
#include <memory>
using namespace llvm;
using namespace opt_tool;
// The OptimizationList is automatically populated with registered Passes by the
// PassNameParser.
//
static cl::list<const PassInfo*, bool, PassNameParser>
PassList(cl::desc("Optimizations available:"));
// This flag specifies a textual description of the optimization pass pipeline
// to run over the module. This flag switches opt to use the new pass manager
// infrastructure, completely disabling all of the flags specific to the old
// pass management.
static cl::opt<std::string> PassPipeline(
"passes",
cl::desc("A textual description of the pass pipeline for optimizing"),
cl::Hidden);
// Other command line options...
//
static cl::opt<std::string>
InputFilename(cl::Positional, cl::desc("<input bitcode file>"),
cl::init("-"), cl::value_desc("filename"));
static cl::opt<std::string>
OutputFilename("o", cl::desc("Override output filename"),
cl::value_desc("filename"));
static cl::opt<bool>
Force("f", cl::desc("Enable binary output on terminals"));
static cl::opt<bool>
PrintEachXForm("p", cl::desc("Print module after each transformation"));
static cl::opt<bool>
NoOutput("disable-output",
cl::desc("Do not write result bitcode file"), cl::Hidden);
static cl::opt<bool>
OutputAssembly("S", cl::desc("Write output as LLVM assembly"));
static cl::opt<bool>
OutputThinLTOBC("thinlto-bc",
cl::desc("Write output as ThinLTO-ready bitcode"));
static cl::opt<bool>
SplitLTOUnit("thinlto-split-lto-unit",
cl::desc("Enable splitting of a ThinLTO LTOUnit"));
static cl::opt<std::string> ThinLinkBitcodeFile(
"thin-link-bitcode-file", cl::value_desc("filename"),
cl::desc(
"A file in which to write minimized bitcode for the thin link only"));
static cl::opt<bool>
NoVerify("disable-verify", cl::desc("Do not run the verifier"), cl::Hidden);
static cl::opt<bool>
VerifyEach("verify-each", cl::desc("Verify after each transform"));
static cl::opt<bool>
DisableDITypeMap("disable-debug-info-type-map",
cl::desc("Don't use a uniquing type map for debug info"));
static cl::opt<bool>
StripDebug("strip-debug",
cl::desc("Strip debugger symbol info from translation unit"));
static cl::opt<bool>
StripNamedMetadata("strip-named-metadata",
cl::desc("Strip module-level named metadata"));
static cl::opt<bool> DisableInline("disable-inlining",
cl::desc("Do not run the inliner pass"));
static cl::opt<bool>
DisableOptimizations("disable-opt",
cl::desc("Do not run any optimization passes"));
static cl::opt<bool>
StandardLinkOpts("std-link-opts",
cl::desc("Include the standard link time optimizations"));
static cl::opt<bool>
OptLevelO0("O0",
cl::desc("Optimization level 0. Similar to clang -O0"));
static cl::opt<bool>
OptLevelO1("O1",
cl::desc("Optimization level 1. Similar to clang -O1"));
static cl::opt<bool>
OptLevelO2("O2",
cl::desc("Optimization level 2. Similar to clang -O2"));
static cl::opt<bool>
OptLevelOs("Os",
cl::desc("Like -O2 with extra optimizations for size. Similar to clang -Os"));
static cl::opt<bool>
OptLevelOz("Oz",
cl::desc("Like -Os but reduces code size further. Similar to clang -Oz"));
static cl::opt<bool>
OptLevelO3("O3",
cl::desc("Optimization level 3. Similar to clang -O3"));
static cl::opt<unsigned>
CodeGenOptLevel("codegen-opt-level",
cl::desc("Override optimization level for codegen hooks"));
static cl::opt<std::string>
TargetTriple("mtriple", cl::desc("Override target triple for module"));
static cl::opt<bool>
DisableLoopUnrolling("disable-loop-unrolling",
cl::desc("Disable loop unrolling in all relevant passes"),
cl::init(false));
static cl::opt<bool>
DisableSLPVectorization("disable-slp-vectorization",
cl::desc("Disable the slp vectorization pass"),
cl::init(false));
static cl::opt<bool> EmitSummaryIndex("module-summary",
cl::desc("Emit module summary index"),
cl::init(false));
static cl::opt<bool> EmitModuleHash("module-hash", cl::desc("Emit module hash"),
cl::init(false));
static cl::opt<bool>
DisableSimplifyLibCalls("disable-simplify-libcalls",
cl::desc("Disable simplify-libcalls"));
static cl::opt<bool>
Quiet("q", cl::desc("Obsolete option"), cl::Hidden);
static cl::alias
QuietA("quiet", cl::desc("Alias for -q"), cl::aliasopt(Quiet));
static cl::opt<bool>
AnalyzeOnly("analyze", cl::desc("Only perform analysis, no optimization"));
static cl::opt<bool> EnableDebugify(
"enable-debugify",
cl::desc(
"Start the pipeline with debugify and end it with check-debugify"));
static cl::opt<bool> DebugifyEach(
"debugify-each",
cl::desc(
"Start each pass with debugify and end it with check-debugify"));
static cl::opt<std::string>
DebugifyExport("debugify-export",
cl::desc("Export per-pass debugify statistics to this file"),
cl::value_desc("filename"), cl::init(""));
static cl::opt<bool>
PrintBreakpoints("print-breakpoints-for-testing",
cl::desc("Print select breakpoints location for testing"));
static cl::opt<std::string> ClDataLayout("data-layout",
cl::desc("data layout string to use"),
cl::value_desc("layout-string"),
cl::init(""));
static cl::opt<bool> PreserveBitcodeUseListOrder(
"preserve-bc-uselistorder",
cl::desc("Preserve use-list order when writing LLVM bitcode."),
cl::init(true), cl::Hidden);
static cl::opt<bool> PreserveAssemblyUseListOrder(
"preserve-ll-uselistorder",
cl::desc("Preserve use-list order when writing LLVM assembly."),
cl::init(false), cl::Hidden);
static cl::opt<bool>
RunTwice("run-twice",
cl::desc("Run all passes twice, re-using the same pass manager."),
cl::init(false), cl::Hidden);
static cl::opt<bool> DiscardValueNames(
"discard-value-names",
cl::desc("Discard names from Value (other than GlobalValue)."),
cl::init(false), cl::Hidden);
static cl::opt<bool> Coroutines(
"enable-coroutines",
cl::desc("Enable coroutine passes."),
cl::init(false), cl::Hidden);
static cl::opt<bool> RemarksWithHotness(
"pass-remarks-with-hotness",
cl::desc("With PGO, include profile count in optimization remarks"),
cl::Hidden);
static cl::opt<unsigned>
RemarksHotnessThreshold("pass-remarks-hotness-threshold",
cl::desc("Minimum profile count required for "
"an optimization remark to be output"),
cl::Hidden);
static cl::opt<std::string>
RemarksFilename("pass-remarks-output",
cl::desc("Output filename for pass remarks"),
cl::value_desc("filename"));
static cl::opt<std::string>
RemarksPasses("pass-remarks-filter",
cl::desc("Only record optimization remarks from passes whose "
"names match the given regular expression"),
cl::value_desc("regex"));
static cl::opt<std::string> RemarksFormat(
"pass-remarks-format",
cl::desc("The format used for serializing remarks (default: YAML)"),
cl::value_desc("format"), cl::init("yaml"));
cl::opt<PGOKind>
PGOKindFlag("pgo-kind", cl::init(NoPGO), cl::Hidden,
cl::desc("The kind of profile guided optimization"),
cl::values(clEnumValN(NoPGO, "nopgo", "Do not use PGO."),
clEnumValN(InstrGen, "pgo-instr-gen-pipeline",
"Instrument the IR to generate profile."),
clEnumValN(InstrUse, "pgo-instr-use-pipeline",
"Use instrumented profile to guide PGO."),
clEnumValN(SampleUse, "pgo-sample-use-pipeline",
"Use sampled profile to guide PGO.")));
cl::opt<std::string> ProfileFile("profile-file",
cl::desc("Path to the profile."), cl::Hidden);
cl::opt<CSPGOKind> CSPGOKindFlag(
"cspgo-kind", cl::init(NoCSPGO), cl::Hidden,
cl::desc("The kind of context sensitive profile guided optimization"),
cl::values(
clEnumValN(NoCSPGO, "nocspgo", "Do not use CSPGO."),
clEnumValN(
CSInstrGen, "cspgo-instr-gen-pipeline",
"Instrument (context sensitive) the IR to generate profile."),
clEnumValN(
CSInstrUse, "cspgo-instr-use-pipeline",
"Use instrumented (context sensitive) profile to guide PGO.")));
cl::opt<std::string> CSProfileGenFile(
"cs-profilegen-file",
cl::desc("Path to the instrumented context sensitive profile."),
cl::Hidden);
class OptCustomPassManager : public legacy::PassManager {
DebugifyStatsMap DIStatsMap;
public:
using super = legacy::PassManager;
void add(Pass *P) override {
// Wrap each pass with (-check)-debugify passes if requested, making
// exceptions for passes which shouldn't see -debugify instrumentation.
bool WrapWithDebugify = DebugifyEach && !P->getAsImmutablePass() &&
!isIRPrintingPass(P) && !isBitcodeWriterPass(P);
if (!WrapWithDebugify) {
super::add(P);
return;
}
// Apply -debugify/-check-debugify before/after each pass and collect
// debug info loss statistics.
PassKind Kind = P->getPassKind();
StringRef Name = P->getPassName();
// TODO: Implement Debugify for BasicBlockPass, LoopPass.
switch (Kind) {
case PT_Function:
super::add(createDebugifyFunctionPass());
super::add(P);
super::add(createCheckDebugifyFunctionPass(true, Name, &DIStatsMap));
break;
case PT_Module:
super::add(createDebugifyModulePass());
super::add(P);
super::add(createCheckDebugifyModulePass(true, Name, &DIStatsMap));
break;
default:
super::add(P);
break;
}
}
const DebugifyStatsMap &getDebugifyStatsMap() const { return DIStatsMap; }
};
static inline void addPass(legacy::PassManagerBase &PM, Pass *P) {
// Add the pass to the pass manager...
PM.add(P);
// If we are verifying all of the intermediate steps, add the verifier...
if (VerifyEach)
PM.add(createVerifierPass());
}
/// This routine adds optimization passes based on selected optimization level,
/// OptLevel.
///
/// OptLevel - Optimization Level
static void AddOptimizationPasses(legacy::PassManagerBase &MPM,
legacy::FunctionPassManager &FPM,
TargetMachine *TM, unsigned OptLevel,
unsigned SizeLevel) {
if (!NoVerify || VerifyEach)
FPM.add(createVerifierPass()); // Verify that input is correct
PassManagerBuilder Builder;
Builder.OptLevel = OptLevel;
Builder.SizeLevel = SizeLevel;
if (DisableInline) {
// No inlining pass
} else if (OptLevel > 1) {
Builder.Inliner = createFunctionInliningPass(OptLevel, SizeLevel, false);
} else {
Builder.Inliner = createAlwaysInlinerLegacyPass();
}
Builder.DisableUnrollLoops = (DisableLoopUnrolling.getNumOccurrences() > 0) ?
DisableLoopUnrolling : OptLevel == 0;
// Check if vectorization is explicitly disabled via -vectorize-loops=false.
// The flag enables vectorization in the LoopVectorize pass, it is on by
// default, and if it was disabled, leave it disabled here.
// Another flag that exists: -loop-vectorize, controls adding the pass to the
// pass manager. If set, the pass is added, and there is no additional check
// here for it.
if (Builder.LoopVectorize)
Builder.LoopVectorize = OptLevel > 1 && SizeLevel < 2;
// When #pragma vectorize is on for SLP, do the same as above
Builder.SLPVectorize =
DisableSLPVectorization ? false : OptLevel > 1 && SizeLevel < 2;
if (TM)
TM->adjustPassManager(Builder);
if (Coroutines)
addCoroutinePassesToExtensionPoints(Builder);
switch (PGOKindFlag) {
case InstrGen:
Builder.EnablePGOInstrGen = true;
Builder.PGOInstrGen = ProfileFile;
break;
case InstrUse:
Builder.PGOInstrUse = ProfileFile;
break;
case SampleUse:
Builder.PGOSampleUse = ProfileFile;
break;
default:
break;
}
switch (CSPGOKindFlag) {
case CSInstrGen:
Builder.EnablePGOCSInstrGen = true;
break;
case CSInstrUse:
Builder.EnablePGOCSInstrUse = true;
break;
default:
break;
}
Builder.populateFunctionPassManager(FPM);
Builder.populateModulePassManager(MPM);
}
static void AddStandardLinkPasses(legacy::PassManagerBase &PM) {
PassManagerBuilder Builder;
Builder.VerifyInput = true;
if (DisableOptimizations)
Builder.OptLevel = 0;
if (!DisableInline)
Builder.Inliner = createFunctionInliningPass();
Builder.populateLTOPassManager(PM);
}
//===----------------------------------------------------------------------===//
// CodeGen-related helper functions.
//
static CodeGenOpt::Level GetCodeGenOptLevel() {
if (CodeGenOptLevel.getNumOccurrences())
return static_cast<CodeGenOpt::Level>(unsigned(CodeGenOptLevel));
if (OptLevelO1)
return CodeGenOpt::Less;
if (OptLevelO2)
return CodeGenOpt::Default;
if (OptLevelO3)
return CodeGenOpt::Aggressive;
return CodeGenOpt::None;
}
// Returns the TargetMachine instance or zero if no triple is provided.
static TargetMachine* GetTargetMachine(Triple TheTriple, StringRef CPUStr,
StringRef FeaturesStr,
const TargetOptions &Options) {
std::string Error;
const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
Error);
// Some modules don't specify a triple, and this is okay.
if (!TheTarget) {
return nullptr;
}
return TheTarget->createTargetMachine(TheTriple.getTriple(), CPUStr,
FeaturesStr, Options, getRelocModel(),
getCodeModel(), GetCodeGenOptLevel());
}
#ifdef LINK_POLLY_INTO_TOOLS
namespace polly {
void initializePollyPasses(llvm::PassRegistry &Registry);
}
#endif
//===----------------------------------------------------------------------===//
// main for opt
//
int main(int argc, char **argv) {
InitLLVM X(argc, argv);
// Enable debug stream buffering.
EnableDebugBuffering = true;
LLVMContext Context;
InitializeAllTargets();
InitializeAllTargetMCs();
InitializeAllAsmPrinters();
InitializeAllAsmParsers();
// Initialize passes
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeCore(Registry);
initializeCoroutines(Registry);
initializeScalarOpts(Registry);
initializeObjCARCOpts(Registry);
initializeVectorization(Registry);
initializeIPO(Registry);
initializeAnalysis(Registry);
initializeTransformUtils(Registry);
initializeInstCombine(Registry);
initializeAggressiveInstCombine(Registry);
initializeInstrumentation(Registry);
initializeTarget(Registry);
// For codegen passes, only passes that do IR to IR transformation are
// supported.
initializeExpandMemCmpPassPass(Registry);
initializeScalarizeMaskedMemIntrinPass(Registry);
initializeCodeGenPreparePass(Registry);
initializeAtomicExpandPass(Registry);
initializeRewriteSymbolsLegacyPassPass(Registry);
initializeWinEHPreparePass(Registry);
initializeDwarfEHPreparePass(Registry);
initializeSafeStackLegacyPassPass(Registry);
initializeSjLjEHPreparePass(Registry);
- initializeStackProtectorPass(Registry);
initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
initializeGlobalMergePass(Registry);
initializeIndirectBrExpandPassPass(Registry);
initializeInterleavedLoadCombinePass(Registry);
initializeInterleavedAccessPass(Registry);
initializeEntryExitInstrumenterPass(Registry);
initializePostInlineEntryExitInstrumenterPass(Registry);
initializeUnreachableBlockElimLegacyPassPass(Registry);
initializeExpandReductionsPass(Registry);
initializeWasmEHPreparePass(Registry);
initializeWriteBitcodePassPass(Registry);
initializeHardwareLoopsPass(Registry);
#ifdef LINK_POLLY_INTO_TOOLS
polly::initializePollyPasses(Registry);
#endif
cl::ParseCommandLineOptions(argc, argv,
"llvm .bc -> .bc modular optimizer and analysis printer\n");
if (AnalyzeOnly && NoOutput) {
errs() << argv[0] << ": analyze mode conflicts with no-output mode.\n";
return 1;
}
SMDiagnostic Err;
Context.setDiscardValueNames(DiscardValueNames);
if (!DisableDITypeMap)
Context.enableDebugTypeODRUniquing();
Expected<std::unique_ptr<ToolOutputFile>> RemarksFileOrErr =
setupOptimizationRemarks(Context, RemarksFilename, RemarksPasses,
RemarksFormat, RemarksWithHotness,
RemarksHotnessThreshold);
if (Error E = RemarksFileOrErr.takeError()) {
errs() << toString(std::move(E)) << '\n';
return 1;
}
std::unique_ptr<ToolOutputFile> RemarksFile = std::move(*RemarksFileOrErr);
// Load the input module...
std::unique_ptr<Module> M =
parseIRFile(InputFilename, Err, Context, !NoVerify, ClDataLayout);
if (!M) {
Err.print(argv[0], errs());
return 1;
}
// Strip debug info before running the verifier.
if (StripDebug)
StripDebugInfo(*M);
// Erase module-level named metadata, if requested.
if (StripNamedMetadata) {
while (!M->named_metadata_empty()) {
NamedMDNode *NMD = &*M->named_metadata_begin();
M->eraseNamedMetadata(NMD);
}
}
// If we are supposed to override the target triple or data layout, do so now.
if (!TargetTriple.empty())
M->setTargetTriple(Triple::normalize(TargetTriple));
// Immediately run the verifier to catch any problems before starting up the
// pass pipelines. Otherwise we can crash on broken code during
// doInitialization().
if (!NoVerify && verifyModule(*M, &errs())) {
errs() << argv[0] << ": " << InputFilename
<< ": error: input module is broken!\n";
return 1;
}
// Figure out what stream we are supposed to write to...
std::unique_ptr<ToolOutputFile> Out;
std::unique_ptr<ToolOutputFile> ThinLinkOut;
if (NoOutput) {
if (!OutputFilename.empty())
errs() << "WARNING: The -o (output filename) option is ignored when\n"
"the --disable-output option is used.\n";
} else {
// Default to standard output.
if (OutputFilename.empty())
OutputFilename = "-";
std::error_code EC;
Out.reset(new ToolOutputFile(OutputFilename, EC, sys::fs::F_None));
if (EC) {
errs() << EC.message() << '\n';
return 1;
}
if (!ThinLinkBitcodeFile.empty()) {
ThinLinkOut.reset(
new ToolOutputFile(ThinLinkBitcodeFile, EC, sys::fs::F_None));
if (EC) {
errs() << EC.message() << '\n';
return 1;
}
}
}
Triple ModuleTriple(M->getTargetTriple());
std::string CPUStr, FeaturesStr;
TargetMachine *Machine = nullptr;
const TargetOptions Options = InitTargetOptionsFromCodeGenFlags();
if (ModuleTriple.getArch()) {
CPUStr = getCPUStr();
FeaturesStr = getFeaturesStr();
Machine = GetTargetMachine(ModuleTriple, CPUStr, FeaturesStr, Options);
} else if (ModuleTriple.getArchName() != "unknown" &&
ModuleTriple.getArchName() != "") {
errs() << argv[0] << ": unrecognized architecture '"
<< ModuleTriple.getArchName() << "' provided.\n";
return 1;
}
std::unique_ptr<TargetMachine> TM(Machine);
// Override function attributes based on CPUStr, FeaturesStr, and command line
// flags.
setFunctionAttributes(CPUStr, FeaturesStr, *M);
// If the output is set to be emitted to standard out, and standard out is a
// console, print out a warning message and refuse to do it. We don't
// impress anyone by spewing tons of binary goo to a terminal.
if (!Force && !NoOutput && !AnalyzeOnly && !OutputAssembly)
if (CheckBitcodeOutputToConsole(Out->os(), !Quiet))
NoOutput = true;
if (OutputThinLTOBC)
M->addModuleFlag(Module::Error, "EnableSplitLTOUnit", SplitLTOUnit);
if (PassPipeline.getNumOccurrences() > 0) {
OutputKind OK = OK_NoOutput;
if (!NoOutput)
OK = OutputAssembly
? OK_OutputAssembly
: (OutputThinLTOBC ? OK_OutputThinLTOBitcode : OK_OutputBitcode);
VerifierKind VK = VK_VerifyInAndOut;
if (NoVerify)
VK = VK_NoVerifier;
else if (VerifyEach)
VK = VK_VerifyEachPass;
// The user has asked to use the new pass manager and provided a pipeline
// string. Hand off the rest of the functionality to the new code for that
// layer.
return runPassPipeline(argv[0], *M, TM.get(), Out.get(), ThinLinkOut.get(),
RemarksFile.get(), PassPipeline, OK, VK,
PreserveAssemblyUseListOrder,
PreserveBitcodeUseListOrder, EmitSummaryIndex,
EmitModuleHash, EnableDebugify)
? 0
: 1;
}
// Create a PassManager to hold and optimize the collection of passes we are
// about to build.
OptCustomPassManager Passes;
bool AddOneTimeDebugifyPasses = EnableDebugify && !DebugifyEach;
// Add an appropriate TargetLibraryInfo pass for the module's triple.
TargetLibraryInfoImpl TLII(ModuleTriple);
// The -disable-simplify-libcalls flag actually disables all builtin optzns.
if (DisableSimplifyLibCalls)
TLII.disableAllFunctions();
Passes.add(new TargetLibraryInfoWrapperPass(TLII));
// Add internal analysis passes from the target machine.
Passes.add(createTargetTransformInfoWrapperPass(TM ? TM->getTargetIRAnalysis()
: TargetIRAnalysis()));
if (AddOneTimeDebugifyPasses)
Passes.add(createDebugifyModulePass());
std::unique_ptr<legacy::FunctionPassManager> FPasses;
if (OptLevelO0 || OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz ||
OptLevelO3) {
FPasses.reset(new legacy::FunctionPassManager(M.get()));
FPasses->add(createTargetTransformInfoWrapperPass(
TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()));
}
if (PrintBreakpoints) {
// Default to standard output.
if (!Out) {
if (OutputFilename.empty())
OutputFilename = "-";
std::error_code EC;
Out = llvm::make_unique<ToolOutputFile>(OutputFilename, EC,
sys::fs::F_None);
if (EC) {
errs() << EC.message() << '\n';
return 1;
}
}
Passes.add(createBreakpointPrinter(Out->os()));
NoOutput = true;
}
if (TM) {
// FIXME: We should dyn_cast this when supported.
auto &LTM = static_cast<LLVMTargetMachine &>(*TM);
Pass *TPC = LTM.createPassConfig(Passes);
Passes.add(TPC);
}
// Create a new optimization pass for each one specified on the command line
for (unsigned i = 0; i < PassList.size(); ++i) {
if (StandardLinkOpts &&
StandardLinkOpts.getPosition() < PassList.getPosition(i)) {
AddStandardLinkPasses(Passes);
StandardLinkOpts = false;
}
if (OptLevelO0 && OptLevelO0.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 0, 0);
OptLevelO0 = false;
}
if (OptLevelO1 && OptLevelO1.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 1, 0);
OptLevelO1 = false;
}
if (OptLevelO2 && OptLevelO2.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);
OptLevelO2 = false;
}
if (OptLevelOs && OptLevelOs.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);
OptLevelOs = false;
}
if (OptLevelOz && OptLevelOz.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);
OptLevelOz = false;
}
if (OptLevelO3 && OptLevelO3.getPosition() < PassList.getPosition(i)) {
AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);
OptLevelO3 = false;
}
const PassInfo *PassInf = PassList[i];
Pass *P = nullptr;
if (PassInf->getNormalCtor())
P = PassInf->getNormalCtor()();
else
errs() << argv[0] << ": cannot create pass: "
<< PassInf->getPassName() << "\n";
if (P) {
PassKind Kind = P->getPassKind();
addPass(Passes, P);
if (AnalyzeOnly) {
switch (Kind) {
case PT_BasicBlock:
Passes.add(createBasicBlockPassPrinter(PassInf, Out->os(), Quiet));
break;
case PT_Region:
Passes.add(createRegionPassPrinter(PassInf, Out->os(), Quiet));
break;
case PT_Loop:
Passes.add(createLoopPassPrinter(PassInf, Out->os(), Quiet));
break;
case PT_Function:
Passes.add(createFunctionPassPrinter(PassInf, Out->os(), Quiet));
break;
case PT_CallGraphSCC:
Passes.add(createCallGraphPassPrinter(PassInf, Out->os(), Quiet));
break;
default:
Passes.add(createModulePassPrinter(PassInf, Out->os(), Quiet));
break;
}
}
}
if (PrintEachXForm)
Passes.add(
createPrintModulePass(errs(), "", PreserveAssemblyUseListOrder));
}
if (StandardLinkOpts) {
AddStandardLinkPasses(Passes);
StandardLinkOpts = false;
}
if (OptLevelO0)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 0, 0);
if (OptLevelO1)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 1, 0);
if (OptLevelO2)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);
if (OptLevelOs)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);
if (OptLevelOz)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);
if (OptLevelO3)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);
if (FPasses) {
FPasses->doInitialization();
for (Function &F : *M)
FPasses->run(F);
FPasses->doFinalization();
}
// Check that the module is well formed on completion of optimization
if (!NoVerify && !VerifyEach)
Passes.add(createVerifierPass());
if (AddOneTimeDebugifyPasses)
Passes.add(createCheckDebugifyModulePass(false));
// In run twice mode, we want to make sure the output is bit-by-bit
// equivalent if we run the pass manager again, so setup two buffers and
// a stream to write to them. Note that llc does something similar and it
// may be worth to abstract this out in the future.
SmallVector<char, 0> Buffer;
SmallVector<char, 0> FirstRunBuffer;
std::unique_ptr<raw_svector_ostream> BOS;
raw_ostream *OS = nullptr;
// Write bitcode or assembly to the output as the last step...
if (!NoOutput && !AnalyzeOnly) {
assert(Out);
OS = &Out->os();
if (RunTwice) {
BOS = make_unique<raw_svector_ostream>(Buffer);
OS = BOS.get();
}
if (OutputAssembly) {
if (EmitSummaryIndex)
report_fatal_error("Text output is incompatible with -module-summary");
if (EmitModuleHash)
report_fatal_error("Text output is incompatible with -module-hash");
Passes.add(createPrintModulePass(*OS, "", PreserveAssemblyUseListOrder));
} else if (OutputThinLTOBC)
Passes.add(createWriteThinLTOBitcodePass(
*OS, ThinLinkOut ? &ThinLinkOut->os() : nullptr));
else
Passes.add(createBitcodeWriterPass(*OS, PreserveBitcodeUseListOrder,
EmitSummaryIndex, EmitModuleHash));
}
// Before executing passes, print the final values of the LLVM options.
cl::PrintOptionValues();
if (!RunTwice) {
// Now that we have all of the passes ready, run them.
Passes.run(*M);
} else {
// If requested, run all passes twice with the same pass manager to catch
// bugs caused by persistent state in the passes.
std::unique_ptr<Module> M2(CloneModule(*M));
// Run all passes on the original module first, so the second run processes
// the clone to catch CloneModule bugs.
Passes.run(*M);
FirstRunBuffer = Buffer;
Buffer.clear();
Passes.run(*M2);
// Compare the two outputs and make sure they're the same
assert(Out);
if (Buffer.size() != FirstRunBuffer.size() ||
(memcmp(Buffer.data(), FirstRunBuffer.data(), Buffer.size()) != 0)) {
errs()
<< "Running the pass manager twice changed the output.\n"
"Writing the result of the second run to the specified output.\n"
"To generate the one-run comparison binary, just run without\n"
"the compile-twice option\n";
Out->os() << BOS->str();
Out->keep();
if (RemarksFile)
RemarksFile->keep();
return 1;
}
Out->os() << BOS->str();
}
if (DebugifyEach && !DebugifyExport.empty())
exportDebugifyStats(DebugifyExport, Passes.getDebugifyStatsMap());
// Declare success.
if (!NoOutput || PrintBreakpoints)
Out->keep();
if (RemarksFile)
RemarksFile->keep();
if (ThinLinkOut)
ThinLinkOut->keep();
return 0;
}
Index: stable/12/contrib/llvm-project/llvm
===================================================================
--- stable/12/contrib/llvm-project/llvm (revision 356464)
+++ stable/12/contrib/llvm-project/llvm (revision 356465)
Property changes on: stable/12/contrib/llvm-project/llvm
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /head/contrib/llvm-project/llvm:r356004
Merged /vendor/llvm-project/release-9.x/llvm:r355951-355987
Index: stable/12/etc/mtree/BSD.debug.dist
===================================================================
--- stable/12/etc/mtree/BSD.debug.dist (revision 356464)
+++ stable/12/etc/mtree/BSD.debug.dist (revision 356465)
@@ -1,62 +1,62 @@
# $FreeBSD$
#
# Please see the file src/etc/mtree/README before making changes to this file.
#
/set type=dir uname=root gname=wheel mode=0755
.
debug
bin
..
boot
kernel
..
modules
..
..
lib
casper
..
geom
..
..
libexec
..
sbin
..
usr
bin
..
lib
clang
- 9.0.0
+ 9.0.1
lib
freebsd
..
..
..
..
engines
..
i18n
..
..
libexec
bsdinstall
..
lpr
ru
..
..
sendmail
..
sm.bin
..
..
sbin
..
tests
..
..
..
..
Index: stable/12/etc/mtree/BSD.usr.dist
===================================================================
--- stable/12/etc/mtree/BSD.usr.dist (revision 356464)
+++ stable/12/etc/mtree/BSD.usr.dist (revision 356465)
@@ -1,1260 +1,1260 @@
# $FreeBSD$
#
# Please see the file src/etc/mtree/README before making changes to this file.
#
/set type=dir uname=root gname=wheel mode=0755
.
bin
..
include
private
bsdstat
..
event
..
gmock
internal
custom
..
..
..
gtest
internal
custom
..
..
..
sqlite3
..
ucl
..
zstd
..
..
..
lib
aout
..
clang
- 9.0.0
+ 9.0.1
include
cuda_wrappers
..
openmp_wrappers
..
ppc_wrappers
..
sanitizer
..
..
lib
freebsd
..
..
..
..
compat
aout
..
..
dtrace
..
engines
..
i18n
..
libxo
encoder
..
..
..
libdata
gcc
..
ldscripts
..
pkgconfig
..
..
libexec
bsdconfig
020.docsinstall
include
..
..
030.packages
include
..
..
040.password
include
..
..
050.diskmgmt
include
..
..
070.usermgmt
include
..
..
080.console
include
..
..
090.timezone
include
..
..
110.mouse
include
..
..
120.networking
include
..
..
130.security
include
..
..
140.startup
include
..
..
150.ttys
include
..
..
dot
include
..
..
include
..
includes
include
..
..
..
bsdinstall
..
dwatch
..
hyperv
..
lpr
ru
..
..
sendmail
..
sm.bin
..
..
local
..
obj nochange
..
sbin
..
share
atf
..
bsdconfig
media
..
networking
..
packages
..
password
..
startup
..
timezone
..
usermgmt
..
..
calendar
de_AT.ISO_8859-15
..
de_DE.ISO8859-1
..
fr_FR.ISO8859-1
..
hr_HR.ISO8859-2
..
hu_HU.ISO8859-2
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
uk_UA.KOI8-U
..
..
dict
..
doc
IPv6
..
atf
..
legal
..
llvm
clang
..
..
ncurses
..
ntp
drivers
icons
..
scripts
..
..
hints
..
icons
..
pic
..
scripts
..
..
pjdfstest
..
..
dtrace
..
examples
BSD_daemon
..
FreeBSD_version
..
IPv6
..
bhyve
..
bootforth
..
bsdconfig
..
csh
..
diskless
..
dma
..
drivers
..
dwatch
..
etc
defaults
..
..
find_interface
..
hast
..
hostapd
..
ibcs2
..
indent
..
ipfilter
..
ipfw
..
jails
..
kld
cdev
module
..
test
..
..
dyn_sysctl
..
firmware
fwconsumer
..
fwimage
..
..
khelp
..
syscall
module
..
test
..
..
..
libusb20
..
libvgl
..
mdoc
..
netgraph
..
pc-sysinstall
..
perfmon
..
pf
..
ppi
..
ppp
..
printing
..
scsi_target
..
ses
getencstat
..
sesd
..
setencstat
..
setobjstat
..
srcs
..
..
smbfs
print
..
..
sunrpc
dir
..
msg
..
sort
..
..
tcsh
..
uefisign
..
ypldap
..
..
firmware
..
games
fortune
..
..
i18n
csmapper
APPLE
..
AST
..
BIG5
..
CNS
..
CP
..
EBCDIC
..
GB
..
GEORGIAN
..
ISO-8859
..
ISO646
..
JIS
..
KAZAKH
..
KOI
..
KS
..
MISC
..
TCVN
..
..
esdb
APPLE
..
AST
..
BIG5
..
CP
..
DEC
..
EBCDIC
..
EUC
..
GB
..
GEORGIAN
..
ISO-2022
..
ISO-8859
..
ISO646
..
KAZAKH
..
KOI
..
MISC
..
TCVN
..
UTF
..
..
..
keys
pkg
revoked
..
trusted
..
..
..
locale
af_ZA.ISO8859-1
..
af_ZA.ISO8859-15
..
af_ZA.UTF-8
..
ar_AE.UTF-8
..
ar_EG.UTF-8
..
ar_JO.UTF-8
..
ar_MA.UTF-8
..
ar_QA.UTF-8
..
ar_SA.UTF-8
..
am_ET.UTF-8
..
be_BY.CP1131
..
be_BY.CP1251
..
be_BY.ISO8859-5
..
be_BY.UTF-8
..
bg_BG.CP1251
..
bg_BG.UTF-8
..
ca_AD.ISO8859-1
..
ca_AD.ISO8859-15
..
ca_ES.ISO8859-1
..
ca_ES.ISO8859-15
..
ca_FR.ISO8859-1
..
ca_FR.ISO8859-15
..
ca_IT.ISO8859-1
..
ca_IT.ISO8859-15
..
ca_AD.UTF-8
..
ca_ES.UTF-8
..
ca_FR.UTF-8
..
ca_IT.UTF-8
..
cs_CZ.ISO8859-2
..
cs_CZ.UTF-8
..
da_DK.ISO8859-1
..
da_DK.ISO8859-15
..
da_DK.UTF-8
..
de_AT.ISO8859-1
..
de_AT.ISO8859-15
..
de_AT.UTF-8
..
de_CH.ISO8859-1
..
de_CH.ISO8859-15
..
de_CH.UTF-8
..
de_DE.ISO8859-1
..
de_DE.ISO8859-15
..
de_DE.UTF-8
..
el_GR.ISO8859-7
..
el_GR.UTF-8
..
en_AU.ISO8859-1
..
en_AU.ISO8859-15
..
en_AU.US-ASCII
..
en_AU.UTF-8
..
en_CA.ISO8859-1
..
en_CA.ISO8859-15
..
en_CA.US-ASCII
..
en_CA.UTF-8
..
en_GB.ISO8859-1
..
en_GB.ISO8859-15
..
en_GB.US-ASCII
..
en_GB.UTF-8
..
en_HK.ISO8859-1
..
en_HK.UTF-8
..
en_IE.ISO8859-1
..
en_IE.ISO8859-15
..
en_IE.UTF-8
..
en_NZ.ISO8859-1
..
en_NZ.ISO8859-15
..
en_NZ.US-ASCII
..
en_NZ.UTF-8
..
en_PH.UTF-8
..
en_SG.ISO8859-1
..
en_SG.UTF-8
..
en_US.ISO8859-1
..
en_US.ISO8859-15
..
en_US.US-ASCII
..
en_US.UTF-8
..
en_ZA.ISO8859-1
..
en_ZA.ISO8859-15
..
en_ZA.US-ASCII
..
en_ZA.UTF-8
..
es_AR.ISO8859-1
..
es_AR.UTF-8
..
es_CR.UTF-8
..
es_ES.ISO8859-1
..
es_ES.ISO8859-15
..
es_ES.UTF-8
..
es_MX.ISO8859-1
..
es_MX.UTF-8
..
et_EE.ISO8859-1
..
et_EE.ISO8859-15
..
et_EE.UTF-8
..
eu_ES.ISO8859-1
..
eu_ES.ISO8859-15
..
eu_ES.UTF-8
..
fi_FI.ISO8859-1
..
fi_FI.ISO8859-15
..
fi_FI.UTF-8
..
fr_BE.ISO8859-1
..
fr_BE.ISO8859-15
..
fr_BE.UTF-8
..
fr_CA.ISO8859-1
..
fr_CA.ISO8859-15
..
fr_CA.UTF-8
..
fr_CH.ISO8859-1
..
fr_CH.ISO8859-15
..
fr_CH.UTF-8
..
fr_FR.ISO8859-1
..
fr_FR.ISO8859-15
..
fr_FR.UTF-8
..
he_IL.UTF-8
..
hi_IN.ISCII-DEV
..
hi_IN.UTF-8
..
hr_HR.ISO8859-2
..
hr_HR.UTF-8
..
hu_HU.ISO8859-2
..
hu_HU.UTF-8
..
hy_AM.ARMSCII-8
..
hy_AM.UTF-8
..
is_IS.ISO8859-1
..
is_IS.ISO8859-15
..
is_IS.UTF-8
..
it_CH.ISO8859-1
..
it_CH.ISO8859-15
..
it_CH.UTF-8
..
it_IT.ISO8859-1
..
it_IT.ISO8859-15
..
it_IT.UTF-8
..
ja_JP.SJIS
..
ja_JP.UTF-8
..
ja_JP.eucJP
..
kk_KZ.UTF-8
..
ko_KR.CP949
..
ko_KR.UTF-8
..
ko_KR.eucKR
..
lt_LT.ISO8859-13
..
lt_LT.UTF-8
..
lv_LV.ISO8859-13
..
lv_LV.UTF-8
..
mn_MN.UTF-8
..
nb_NO.ISO8859-1
..
nb_NO.ISO8859-15
..
nb_NO.UTF-8
..
nl_BE.ISO8859-1
..
nl_BE.ISO8859-15
..
nl_BE.UTF-8
..
nl_NL.ISO8859-1
..
nl_NL.ISO8859-15
..
nl_NL.UTF-8
..
nn_NO.ISO8859-1
..
nn_NO.ISO8859-15
..
nn_NO.UTF-8
..
pl_PL.ISO8859-2
..
pl_PL.UTF-8
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
pt_PT.ISO8859-1
..
pt_PT.ISO8859-15
..
pt_PT.UTF-8
..
ro_RO.ISO8859-2
..
ro_RO.UTF-8
..
ru_RU.CP1251
..
ru_RU.CP866
..
ru_RU.ISO8859-5
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
se_FI.UTF-8
..
se_NO.UTF-8
..
sk_SK.ISO8859-2
..
sk_SK.UTF-8
..
sl_SI.ISO8859-2
..
sl_SI.UTF-8
..
sr_RS.ISO8859-5
..
sr_RS.UTF-8
..
sr_RS.ISO8859-2
..
sr_RS.UTF-8@latin
..
sv_FI.ISO8859-1
..
sv_FI.ISO8859-15
..
sv_FI.UTF-8
..
sv_SE.ISO8859-1
..
sv_SE.ISO8859-15
..
sv_SE.UTF-8
..
tr_TR.ISO8859-9
..
tr_TR.UTF-8
..
uk_UA.CP1251
..
uk_UA.ISO8859-5
..
uk_UA.KOI8-U
..
uk_UA.UTF-8
..
zh_CN.GB18030
..
zh_CN.GB2312
..
zh_CN.GBK
..
zh_CN.eucCN
..
zh_CN.UTF-8
..
zh_HK.UTF-8
..
zh_TW.Big5
..
zh_TW.UTF-8
..
..
man
man1
..
man2
..
man3
..
man4
aarch64
..
amd64
..
arm
..
i386
..
powerpc
..
sparc64
..
..
man5
..
man6
..
man7
..
man8
amd64
..
i386
..
powerpc
..
sparc64
..
..
man9
..
..
misc
fonts
..
..
mk
..
nls
C
..
af_ZA.ISO8859-1
..
af_ZA.ISO8859-15
..
af_ZA.UTF-8
..
am_ET.UTF-8
..
be_BY.CP1131
..
be_BY.CP1251
..
be_BY.ISO8859-5
..
be_BY.UTF-8
..
bg_BG.CP1251
..
bg_BG.UTF-8
..
ca_ES.ISO8859-1
..
ca_ES.ISO8859-15
..
ca_ES.UTF-8
..
cs_CZ.ISO8859-2
..
cs_CZ.UTF-8
..
da_DK.ISO8859-1
..
da_DK.ISO8859-15
..
da_DK.UTF-8
..
de_AT.ISO8859-1
..
de_AT.ISO8859-15
..
de_AT.UTF-8
..
de_CH.ISO8859-1
..
de_CH.ISO8859-15
..
de_CH.UTF-8
..
de_DE.ISO8859-1
..
de_DE.ISO8859-15
..
de_DE.UTF-8
..
el_GR.ISO8859-7
..
el_GR.UTF-8
..
en_AU.ISO8859-1
..
en_AU.ISO8859-15
..
en_AU.US-ASCII
..
en_AU.UTF-8
..
en_CA.ISO8859-1
..
en_CA.ISO8859-15
..
en_CA.US-ASCII
..
en_CA.UTF-8
..
en_GB.ISO8859-1
..
en_GB.ISO8859-15
..
en_GB.US-ASCII
..
en_GB.UTF-8
..
en_IE.UTF-8
..
en_NZ.ISO8859-1
..
en_NZ.ISO8859-15
..
en_NZ.US-ASCII
..
en_NZ.UTF-8
..
en_US.ISO8859-1
..
en_US.ISO8859-15
..
en_US.UTF-8
..
es_ES.ISO8859-1
..
es_ES.ISO8859-15
..
es_ES.UTF-8
..
et_EE.ISO8859-15
..
et_EE.UTF-8
..
fi_FI.ISO8859-1
..
fi_FI.ISO8859-15
..
fi_FI.UTF-8
..
fr_BE.ISO8859-1
..
fr_BE.ISO8859-15
..
fr_BE.UTF-8
..
fr_CA.ISO8859-1
..
fr_CA.ISO8859-15
..
fr_CA.UTF-8
..
fr_CH.ISO8859-1
..
fr_CH.ISO8859-15
..
fr_CH.UTF-8
..
fr_FR.ISO8859-1
..
fr_FR.ISO8859-15
..
fr_FR.UTF-8
..
gl_ES.ISO8859-1
..
he_IL.UTF-8
..
hi_IN.ISCII-DEV
..
hr_HR.ISO8859-2
..
hr_HR.UTF-8
..
hu_HU.ISO8859-2
..
hu_HU.UTF-8
..
hy_AM.ARMSCII-8
..
hy_AM.UTF-8
..
is_IS.ISO8859-1
..
is_IS.ISO8859-15
..
is_IS.UTF-8
..
it_CH.ISO8859-1
..
it_CH.ISO8859-15
..
it_CH.UTF-8
..
it_IT.ISO8859-1
..
it_IT.ISO8859-15
..
it_IT.UTF-8
..
ja_JP.SJIS
..
ja_JP.UTF-8
..
ja_JP.eucJP
..
kk_KZ.PT154
..
kk_KZ.UTF-8
..
ko_KR.CP949
..
ko_KR.UTF-8
..
ko_KR.eucKR
..
lt_LT.ISO8859-13
..
lt_LT.UTF-8
..
lv_LV.ISO8859-13
..
lv_LV.UTF-8
..
mn_MN.UTF-8
..
nl_BE.ISO8859-1
..
nl_BE.ISO8859-15
..
nl_BE.UTF-8
..
nl_NL.ISO8859-1
..
nl_NL.ISO8859-15
..
nl_NL.UTF-8
..
no_NO.ISO8859-1
..
no_NO.ISO8859-15
..
no_NO.UTF-8
..
pl_PL.ISO8859-2
..
pl_PL.UTF-8
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
pt_PT.ISO8859-1
..
pt_PT.ISO8859-15
..
pt_PT.UTF-8
..
ro_RO.ISO8859-2
..
ro_RO.UTF-8
..
ru_RU.CP1251
..
ru_RU.CP866
..
ru_RU.ISO8859-5
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
sk_SK.ISO8859-2
..
sk_SK.UTF-8
..
sl_SI.ISO8859-2
..
sl_SI.UTF-8
..
sr_YU.ISO8859-2
..
sr_YU.ISO8859-5
..
sr_YU.UTF-8
..
sv_SE.ISO8859-1
..
sv_SE.ISO8859-15
..
sv_SE.UTF-8
..
tr_TR.ISO8859-9
..
tr_TR.UTF-8
..
uk_UA.ISO8859-5
..
uk_UA.KOI8-U
..
uk_UA.UTF-8
..
zh_CN.GB18030
..
zh_CN.GB2312
..
zh_CN.GBK
..
zh_CN.UTF-8
..
zh_CN.eucCN
..
zh_HK.UTF-8
..
zh_TW.UTF-8
..
..
openssl
man
man1
..
man3
..
..
..
pc-sysinstall
backend
..
backend-partmanager
..
backend-query
..
conf
license
..
..
doc
..
..
security
..
sendmail
..
skel
..
snmp
defs
..
mibs
..
..
syscons
fonts
..
keymaps
..
scrnmaps
..
..
tabset
..
vi
catalog
..
..
vt
fonts
..
keymaps
..
..
zoneinfo
Africa
..
America
Argentina
..
Indiana
..
Kentucky
..
North_Dakota
..
..
Antarctica
..
Arctic
..
Asia
..
Atlantic
..
Australia
..
Etc
..
Europe
..
Indian
..
Pacific
..
SystemV
..
..
..
src nochange
..
..
Index: stable/12/lib/clang/freebsd_cc_version.h
===================================================================
--- stable/12/lib/clang/freebsd_cc_version.h (revision 356464)
+++ stable/12/lib/clang/freebsd_cc_version.h (revision 356465)
@@ -1,3 +1,3 @@
/* $FreeBSD$ */
-#define FREEBSD_CC_VERSION 1200020
+#define FREEBSD_CC_VERSION 1200021
Index: stable/12/lib/clang/headers/Makefile
===================================================================
--- stable/12/lib/clang/headers/Makefile (revision 356464)
+++ stable/12/lib/clang/headers/Makefile (revision 356465)
@@ -1,169 +1,170 @@
+
# $FreeBSD$
.include "../clang.pre.mk"
.PATH: ${CLANG_SRCS}/lib/Headers
INCSGROUPS= INCS CUDA OMP PPC
-INCSDIR= ${LIBDIR}/clang/9.0.0/include
+INCSDIR= ${LIBDIR}/clang/9.0.1/include
CUDADIR= ${INCSDIR}/cuda_wrappers
OMPDIR= ${INCSDIR}/openmp_wrappers
PPCDIR= ${INCSDIR}/ppc_wrappers
GENINCS+= arm_fp16.h
GENINCS+= arm_neon.h
INCS+= __clang_cuda_builtin_vars.h
INCS+= __clang_cuda_cmath.h
INCS+= __clang_cuda_complex_builtins.h
INCS+= __clang_cuda_device_functions.h
INCS+= __clang_cuda_intrinsics.h
INCS+= __clang_cuda_libdevice_declares.h
INCS+= __clang_cuda_math_forward_declares.h
INCS+= __clang_cuda_runtime_wrapper.h
INCS+= __stddef_max_align_t.h
INCS+= __wmmintrin_aes.h
INCS+= __wmmintrin_pclmul.h
INCS+= adxintrin.h
INCS+= altivec.h
INCS+= ammintrin.h
INCS+= arm64intr.h
INCS+= arm_acle.h
INCS+= armintr.h
INCS+= avx2intrin.h
INCS+= avx512bf16intrin.h
INCS+= avx512bitalgintrin.h
INCS+= avx512bwintrin.h
INCS+= avx512cdintrin.h
INCS+= avx512dqintrin.h
INCS+= avx512erintrin.h
INCS+= avx512fintrin.h
INCS+= avx512ifmaintrin.h
INCS+= avx512ifmavlintrin.h
INCS+= avx512pfintrin.h
INCS+= avx512vbmi2intrin.h
INCS+= avx512vbmiintrin.h
INCS+= avx512vbmivlintrin.h
INCS+= avx512vlbf16intrin.h
INCS+= avx512vlbitalgintrin.h
INCS+= avx512vlbwintrin.h
INCS+= avx512vlcdintrin.h
INCS+= avx512vldqintrin.h
INCS+= avx512vlintrin.h
INCS+= avx512vlvbmi2intrin.h
INCS+= avx512vlvnniintrin.h
INCS+= avx512vlvp2intersectintrin.h
INCS+= avx512vnniintrin.h
INCS+= avx512vp2intersectintrin.h
INCS+= avx512vpopcntdqintrin.h
INCS+= avx512vpopcntdqvlintrin.h
INCS+= avxintrin.h
INCS+= bmi2intrin.h
INCS+= bmiintrin.h
INCS+= cetintrin.h
INCS+= cldemoteintrin.h
INCS+= clflushoptintrin.h
INCS+= clwbintrin.h
INCS+= clzerointrin.h
INCS+= cpuid.h
INCS+= emmintrin.h
INCS+= enqcmdintrin.h
INCS+= f16cintrin.h
INCS+= fma4intrin.h
INCS+= fmaintrin.h
INCS+= fxsrintrin.h
INCS+= gfniintrin.h
INCS+= htmintrin.h
INCS+= htmxlintrin.h
INCS+= ia32intrin.h
INCS+= immintrin.h
INCS+= invpcidintrin.h
INCS+= lwpintrin.h
INCS+= lzcntintrin.h
INCS+= mm3dnow.h
INCS+= mm_malloc.h
INCS+= mmintrin.h
INCS+= module.modulemap
INCS+= movdirintrin.h
INCS+= msa.h
INCS+= mwaitxintrin.h
INCS+= nmmintrin.h
INCS+= opencl-c-base.h
INCS+= opencl-c.h
INCS+= pconfigintrin.h
INCS+= pkuintrin.h
INCS+= pmmintrin.h
INCS+= popcntintrin.h
INCS+= prfchwintrin.h
INCS+= ptwriteintrin.h
INCS+= rdseedintrin.h
INCS+= rtmintrin.h
INCS+= s390intrin.h
INCS+= sgxintrin.h
INCS+= shaintrin.h
INCS+= smmintrin.h
INCS+= tbmintrin.h
INCS+= tmmintrin.h
INCS+= vadefs.h
INCS+= vaesintrin.h
INCS+= vecintrin.h
INCS+= vpclmulqdqintrin.h
INCS+= waitpkgintrin.h
INCS+= wbnoinvdintrin.h
INCS+= wmmintrin.h
INCS+= x86intrin.h
INCS+= xmmintrin.h
INCS+= xopintrin.h
INCS+= xsavecintrin.h
INCS+= xsaveintrin.h
INCS+= xsaveoptintrin.h
INCS+= xsavesintrin.h
INCS+= xtestintrin.h
INCS+= ${GENINCS}
# Headers which possibly conflict with our own versions:
.if defined(INSTALL_CONFLICTING_CLANG_HEADERS)
INCS+= float.h
INCS+= intrin.h
INCS+= inttypes.h
INCS+= iso646.h
INCS+= limits.h
INCS+= stdalign.h
INCS+= stdarg.h
INCS+= stdatomic.h
INCS+= stdbool.h
INCS+= stddef.h
INCS+= stdint.h
INCS+= stdnoreturn.h
INCS+= tgmath.h
INCS+= unwind.h
INCS+= varargs.h
.endif
CUDA+= cuda_wrappers/algorithm
CUDA+= cuda_wrappers/complex
CUDA+= cuda_wrappers/new
OMP+= openmp_wrappers/__clang_openmp_math.h
OMP+= openmp_wrappers/__clang_openmp_math_declares.h
OMP+= openmp_wrappers/cmath
OMP+= openmp_wrappers/math.h
PPC+= ppc_wrappers/emmintrin.h
PPC+= ppc_wrappers/mm_malloc.h
PPC+= ppc_wrappers/mmintrin.h
PPC+= ppc_wrappers/xmmintrin.h
arm_fp16.h: ${CLANG_SRCS}/include/clang/Basic/arm_fp16.td
${CLANG_TBLGEN} -gen-arm-fp16 \
-I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/$/.d/} \
-o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/arm_fp16.td
arm_neon.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
${CLANG_TBLGEN} -gen-arm-neon \
-I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/$/.d/} \
-o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
CLEANFILES= ${GENINCS} ${GENINCS:C/$/.d/}
.include <bsd.prog.mk>
Index: stable/12/lib/clang/include/VCSVersion.inc
===================================================================
--- stable/12/lib/clang/include/VCSVersion.inc (revision 356464)
+++ stable/12/lib/clang/include/VCSVersion.inc (revision 356465)
@@ -1,14 +1,14 @@
// $FreeBSD$
-#define LLVM_REVISION "372316"
-#define LLVM_REPOSITORY "https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_900/final"
+#define LLVM_REVISION "c1a0a213378a458fbea1a5c77b315c7dce08fd05"
+#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
-#define CLANG_REVISION "372316"
-#define CLANG_REPOSITORY "https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_900/final"
+#define CLANG_REVISION "c1a0a213378a458fbea1a5c77b315c7dce08fd05"
+#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
-#define LLD_REVISION "372316-1200009"
+#define LLD_REVISION "c1a0a213378a458fbea1a5c77b315c7dce08fd05-1200010"
#define LLD_REPOSITORY "FreeBSD"
-#define LLDB_REVISION "372316"
-#define LLDB_REPOSITORY "https://llvm.org/svn/llvm-project/lldb/tags/RELEASE_900/final"
+#define LLDB_REVISION "c1a0a213378a458fbea1a5c77b315c7dce08fd05"
+#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"
Index: stable/12/lib/clang/include/clang/Basic/Version.inc
===================================================================
--- stable/12/lib/clang/include/clang/Basic/Version.inc (revision 356464)
+++ stable/12/lib/clang/include/clang/Basic/Version.inc (revision 356465)
@@ -1,9 +1,9 @@
/* $FreeBSD$ */
-#define CLANG_VERSION 9.0.0
-#define CLANG_VERSION_STRING "9.0.0"
+#define CLANG_VERSION 9.0.1
+#define CLANG_VERSION_STRING "9.0.1"
#define CLANG_VERSION_MAJOR 9
#define CLANG_VERSION_MINOR 0
-#define CLANG_VERSION_PATCHLEVEL 0
+#define CLANG_VERSION_PATCHLEVEL 1
#define CLANG_VENDOR "FreeBSD "
Index: stable/12/lib/clang/include/clang/Config/config.h
===================================================================
--- stable/12/lib/clang/include/clang/Config/config.h (revision 356464)
+++ stable/12/lib/clang/include/clang/Config/config.h (revision 356465)
@@ -1,84 +1,84 @@
/* $FreeBSD$ */
/* This generated file is for internal use. Do not include it from headers. */
#ifdef CLANG_CONFIG_H
#error config.h can only be included once
#else
#define CLANG_CONFIG_H
/* Bug report URL. */
#define BUG_REPORT_URL "https://bugs.freebsd.org/submit/"
/* Default linker to use. */
#define CLANG_DEFAULT_LINKER ""
/* Default C/ObjC standard to use. */
/* #undef CLANG_DEFAULT_STD_C */
/* Default C++/ObjC++ standard to use. */
/* #undef CLANG_DEFAULT_STD_CXX */
/* Default C++ stdlib to use. */
#define CLANG_DEFAULT_CXX_STDLIB ""
/* Default runtime library to use. */
#define CLANG_DEFAULT_RTLIB ""
/* Default unwind library to use. */
#define CLANG_DEFAULT_UNWINDLIB ""
/* Default objcopy to use */
#define CLANG_DEFAULT_OBJCOPY "objcopy"
/* Default OpenMP runtime used by -fopenmp. */
#define CLANG_DEFAULT_OPENMP_RUNTIME "libomp"
/* Default architecture for OpenMP offloading to Nvidia GPUs. */
#define CLANG_OPENMP_NVPTX_DEFAULT_ARCH "sm_35"
/* Multilib suffix for libdir. */
#define CLANG_LIBDIR_SUFFIX ""
/* Relative directory for resource files */
#define CLANG_RESOURCE_DIR ""
/* Directories clang will search for headers */
#define C_INCLUDE_DIRS ""
/* Directories clang will search for configuration files */
/* #undef CLANG_CONFIG_FILE_SYSTEM_DIR */
/* #undef CLANG_CONFIG_FILE_USER_DIR */
/* Default <path> to all compiler invocations for --sysroot=<path>. */
/* #undef DEFAULT_SYSROOT */
/* Directory where gcc is installed. */
#define GCC_INSTALL_PREFIX ""
/* Define if we have libxml2 */
/* #undef CLANG_HAVE_LIBXML */
/* Define if we have sys/resource.h (rlimits) */
#define CLANG_HAVE_RLIMITS 1
/* The LLVM product name and version */
-#define BACKEND_PACKAGE_STRING "LLVM 9.0.0"
+#define BACKEND_PACKAGE_STRING "LLVM 9.0.1"
/* Linker version detected at compile time. */
/* #undef HOST_LINK_VERSION */
/* pass --build-id to ld */
/* #undef ENABLE_LINKER_BUILD_ID */
/* enable x86 relax relocations by default */
#define ENABLE_X86_RELAX_RELOCATIONS 0
/* Enable the experimental new pass manager by default */
#define ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER 0
/* Enable each functionality of modules */
/* #undef CLANG_ENABLE_ARCMT */
/* #undef CLANG_ENABLE_OBJC_REWRITER */
/* #undef CLANG_ENABLE_STATIC_ANALYZER */
#endif
Index: stable/12/lib/clang/include/lld/Common/Version.inc
===================================================================
--- stable/12/lib/clang/include/lld/Common/Version.inc (revision 356464)
+++ stable/12/lib/clang/include/lld/Common/Version.inc (revision 356465)
@@ -1,6 +1,6 @@
// $FreeBSD$
-#define LLD_VERSION 9.0.0
-#define LLD_VERSION_STRING "9.0.0"
+#define LLD_VERSION 9.0.1
+#define LLD_VERSION_STRING "9.0.1"
#define LLD_VERSION_MAJOR 9
#define LLD_VERSION_MINOR 0
Index: stable/12/lib/clang/include/llvm/Config/config.h
===================================================================
--- stable/12/lib/clang/include/llvm/Config/config.h (revision 356464)
+++ stable/12/lib/clang/include/llvm/Config/config.h (revision 356465)
@@ -1,357 +1,357 @@
/* $FreeBSD$ */
#ifndef CONFIG_H
#define CONFIG_H
/* Exported configuration */
#include "llvm/Config/llvm-config.h"
/* Bug report URL. */
#define BUG_REPORT_URL "https://bugs.freebsd.org/submit/"
/* Define to 1 to enable backtraces, and to 0 otherwise. */
#define ENABLE_BACKTRACES 1
/* Define to 1 to enable crash overrides, and to 0 otherwise. */
#define ENABLE_CRASH_OVERRIDES 1
/* Define to 1 to enable crash memory dumps, and to 0 otherwise. */
#define LLVM_ENABLE_CRASH_DUMPS 0
/* Define to 1 if you have the `backtrace' function. */
#define HAVE_BACKTRACE TRUE
#define BACKTRACE_HEADER <execinfo.h>
/* Define to 1 if you have the <CrashReporterClient.h> header file. */
/* #undef HAVE_CRASHREPORTERCLIENT_H */
/* can use __crashreporter_info__ */
#define HAVE_CRASHREPORTER_INFO 0
/* Define to 1 if you have the declaration of `arc4random', and to 0 if you
don't. */
#define HAVE_DECL_ARC4RANDOM 1
/* Define to 1 if you have the declaration of `FE_ALL_EXCEPT', and to 0 if you
don't. */
#define HAVE_DECL_FE_ALL_EXCEPT 1
/* Define to 1 if you have the declaration of `FE_INEXACT', and to 0 if you
don't. */
#define HAVE_DECL_FE_INEXACT 1
/* Define to 1 if you have the declaration of `strerror_s', and to 0 if you
don't. */
#define HAVE_DECL_STRERROR_S 0
/* Define to 1 if you have the DIA SDK installed, and to 0 if you don't. */
#define LLVM_ENABLE_DIA_SDK 0
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define if dlopen() is available on this platform. */
#define HAVE_DLOPEN 1
/* Define if dladdr() is available on this platform. */
#define HAVE_DLADDR 1
/* Define to 1 if you have the <errno.h> header file. */
#define HAVE_ERRNO_H 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <fenv.h> header file. */
#define HAVE_FENV_H 1
/* Define if libffi is available on this platform. */
/* #undef HAVE_FFI_CALL */
/* Define to 1 if you have the <ffi/ffi.h> header file. */
/* #undef HAVE_FFI_FFI_H */
/* Define to 1 if you have the <ffi.h> header file. */
/* #undef HAVE_FFI_H */
/* Define to 1 if you have the `futimens' function. */
#define HAVE_FUTIMENS 1
/* Define to 1 if you have the `futimes' function. */
#define HAVE_FUTIMES 1
/* Define to 1 if you have the `getpagesize' function. */
#define HAVE_GETPAGESIZE 1
/* Define to 1 if you have the `getrlimit' function. */
#define HAVE_GETRLIMIT 1
/* Define to 1 if you have the `getrusage' function. */
#define HAVE_GETRUSAGE 1
/* Define to 1 if you have the `isatty' function. */
#define HAVE_ISATTY 1
/* Define to 1 if you have the `edit' library (-ledit). */
#define HAVE_LIBEDIT 1
/* Define to 1 if you have the `pfm' library (-lpfm). */
/* #undef HAVE_LIBPFM */
/* Define to 1 if you have the `psapi' library (-lpsapi). */
/* #undef HAVE_LIBPSAPI */
/* Define to 1 if you have the `pthread' library (-lpthread). */
#define HAVE_LIBPTHREAD 1
/* Define to 1 if you have the `pthread_getname_np' function. */
/* #undef HAVE_PTHREAD_GETNAME_NP */
/* Define to 1 if you have the `pthread_setname_np' function. */
/* #undef HAVE_PTHREAD_SETNAME_NP */
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if you have the <link.h> header file. */
#define HAVE_LINK_H 1
/* Define to 1 if you have the `lseek64' function. */
/* #undef HAVE_LSEEK64 */
/* Define to 1 if you have the <mach/mach.h> header file. */
/* #undef HAVE_MACH_MACH_H */
/* Define to 1 if you have the `mallctl' function. */
#define HAVE_MALLCTL 1
/* Define to 1 if you have the `mallinfo' function. */
/* #undef HAVE_MALLINFO */
/* Define to 1 if you have the <malloc/malloc.h> header file. */
/* #undef HAVE_MALLOC_MALLOC_H */
/* Define to 1 if you have the `malloc_zone_statistics' function. */
/* #undef HAVE_MALLOC_ZONE_STATISTICS */
/* Define to 1 if you have the `posix_fallocate' function. */
#define HAVE_POSIX_FALLOCATE 1
/* Define to 1 if you have the `posix_spawn' function. */
#define HAVE_POSIX_SPAWN 1
/* Define to 1 if you have the `pread' function. */
#define HAVE_PREAD 1
/* Have pthread_getspecific */
#define HAVE_PTHREAD_GETSPECIFIC 1
/* Define to 1 if you have the <pthread.h> header file. */
#define HAVE_PTHREAD_H 1
/* Have pthread_mutex_lock */
#define HAVE_PTHREAD_MUTEX_LOCK 1
/* Have pthread_rwlock_init */
#define HAVE_PTHREAD_RWLOCK_INIT 1
/* Define to 1 if you have the `sbrk' function. */
#define HAVE_SBRK 1
/* Define to 1 if you have the `setenv' function. */
#define HAVE_SETENV 1
/* Define to 1 if you have the `sched_getaffinity' function. */
/* #undef HAVE_SCHED_GETAFFINITY */
/* Define to 1 if you have the `CPU_COUNT' macro. */
/* #undef HAVE_CPU_COUNT */
/* Define to 1 if you have the `setrlimit' function. */
#define HAVE_SETRLIMIT 1
/* Define to 1 if you have the `sigaltstack' function. */
#define HAVE_SIGALTSTACK 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the `sysconf' function. */
#define HAVE_SYSCONF 1
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if stat struct has st_mtimespec member .*/
#define HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC 1
/* Define to 1 if stat struct has st_mtim member. */
#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define if the setupterm() function is supported this platform. */
#define HAVE_TERMINFO 1
/* Define if the xar_open() function is supported this platform. */
/* #undef HAVE_LIBXAR */
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the <valgrind/valgrind.h> header file. */
/* #undef HAVE_VALGRIND_VALGRIND_H */
/* Define to 1 if you have the <zlib.h> header file. */
#define HAVE_ZLIB_H 1
/* Have host's _alloca */
/* #undef HAVE__ALLOCA */
/* Define to 1 if you have the `_chsize_s' function. */
/* #undef HAVE__CHSIZE_S */
/* Define to 1 if you have the `_Unwind_Backtrace' function. */
/* #undef HAVE__UNWIND_BACKTRACE */
/* Have host's __alloca */
/* #undef HAVE___ALLOCA */
/* Have host's __ashldi3 */
/* #undef HAVE___ASHLDI3 */
/* Have host's __ashrdi3 */
/* #undef HAVE___ASHRDI3 */
/* Have host's __chkstk */
/* #undef HAVE___CHKSTK */
/* Have host's __chkstk_ms */
/* #undef HAVE___CHKSTK_MS */
/* Have host's __cmpdi2 */
/* #undef HAVE___CMPDI2 */
/* Have host's __divdi3 */
/* #undef HAVE___DIVDI3 */
/* Have host's __fixdfdi */
/* #undef HAVE___FIXDFDI */
/* Have host's __fixsfdi */
/* #undef HAVE___FIXSFDI */
/* Have host's __floatdidf */
/* #undef HAVE___FLOATDIDF */
/* Have host's __lshrdi3 */
/* #undef HAVE___LSHRDI3 */
/* Have host's __main */
/* #undef HAVE___MAIN */
/* Have host's __moddi3 */
/* #undef HAVE___MODDI3 */
/* Have host's __udivdi3 */
/* #undef HAVE___UDIVDI3 */
/* Have host's __umoddi3 */
/* #undef HAVE___UMODDI3 */
/* Have host's ___chkstk */
/* #undef HAVE____CHKSTK */
/* Have host's ___chkstk_ms */
/* #undef HAVE____CHKSTK_MS */
/* Linker version detected at compile time. */
/* #undef HOST_LINK_VERSION */
/* Target triple LLVM will generate code for by default */
/* Doesn't use `cmakedefine` because it is allowed to be empty. */
/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
/* Define if zlib compression is available */
#define LLVM_ENABLE_ZLIB 1
/* Define if overriding target triple is enabled */
/* #undef LLVM_TARGET_TRIPLE_ENV */
/* LLVM version information */
/* #undef LLVM_VERSION_INFO */
/* Whether tools show host and target info when invoked with --version */
#define LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO 1
/* Define if libxml2 is supported on this platform. */
/* #undef LLVM_LIBXML2_ENABLED */
/* Define to the extension used for shared libraries, say, ".so". */
#define LTDL_SHLIB_EXT ".so"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "https://bugs.freebsd.org/submit/"
/* Define to the full name of this package. */
#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "LLVM 9.0.0"
+#define PACKAGE_STRING "LLVM 9.0.1"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "9.0.0"
+#define PACKAGE_VERSION "9.0.1"
/* Define to the vendor of this package. */
/* #undef PACKAGE_VENDOR */
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* Define if std::is_trivially_copyable is supported */
#define HAVE_STD_IS_TRIVIALLY_COPYABLE 1
/* Define to a function implementing stricmp */
/* #undef stricmp */
/* Define to a function implementing strdup */
/* #undef strdup */
/* Whether GlobalISel rule coverage is being collected */
#define LLVM_GISEL_COV_ENABLED 0
/* Define if we have z3 and want to build it */
/* #undef LLVM_WITH_Z3 */
/* Define to the default GlobalISel coverage file prefix */
/* #undef LLVM_GISEL_COV_PREFIX */
/* Whether Timers signpost passes in Xcode Instruments */
#define LLVM_SUPPORT_XCODE_SIGNPOSTS 0
#endif
Index: stable/12/lib/clang/include/llvm/Config/llvm-config.h
===================================================================
--- stable/12/lib/clang/include/llvm/Config/llvm-config.h (revision 356464)
+++ stable/12/lib/clang/include/llvm/Config/llvm-config.h (revision 356465)
@@ -1,86 +1,86 @@
/* $FreeBSD$ */
/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
/* */
/* Part of the LLVM Project, under the Apache License v2.0 with LLVM */
/* Exceptions. */
/* See https://llvm.org/LICENSE.txt for license information. */
/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */
/* */
/*===----------------------------------------------------------------------===*/
/* This file enumerates variables from the LLVM configuration so that they
can be in exported headers and won't override package specific directives.
This is a C header that can be included in the llvm-c headers. */
#ifndef LLVM_CONFIG_H
#define LLVM_CONFIG_H
/* Define if LLVM_ENABLE_DUMP is enabled */
/* #undef LLVM_ENABLE_DUMP */
/* Define if we link Polly to the tools */
/* #undef LINK_POLLY_INTO_TOOLS */
/* Target triple LLVM will generate code for by default */
/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
/* Define if threads enabled */
#define LLVM_ENABLE_THREADS 1
/* Has gcc/MSVC atomic intrinsics */
#define LLVM_HAS_ATOMICS 1
/* Host triple LLVM will be executed on */
/* #undef LLVM_HOST_TRIPLE */
/* LLVM architecture name for the native architecture, if available */
/* #undef LLVM_NATIVE_ARCH */
/* LLVM name for the native AsmParser init function, if available */
/* #undef LLVM_NATIVE_ASMPARSER */
/* LLVM name for the native AsmPrinter init function, if available */
/* #undef LLVM_NATIVE_ASMPRINTER */
/* LLVM name for the native Disassembler init function, if available */
/* #undef LLVM_NATIVE_DISASSEMBLER */
/* LLVM name for the native Target init function, if available */
/* #undef LLVM_NATIVE_TARGET */
/* LLVM name for the native TargetInfo init function, if available */
/* #undef LLVM_NATIVE_TARGETINFO */
/* LLVM name for the native target MC init function, if available */
/* #undef LLVM_NATIVE_TARGETMC */
/* Define if this is Unixish platform */
#define LLVM_ON_UNIX 1
/* Define if we have the Intel JIT API runtime support library */
#define LLVM_USE_INTEL_JITEVENTS 0
/* Define if we have the oprofile JIT-support library */
#define LLVM_USE_OPROFILE 0
/* Define if we have the perf JIT-support library */
#define LLVM_USE_PERF 0
/* Major version of the LLVM API */
#define LLVM_VERSION_MAJOR 9
/* Minor version of the LLVM API */
#define LLVM_VERSION_MINOR 0
/* Patch version of the LLVM API */
-#define LLVM_VERSION_PATCH 0
+#define LLVM_VERSION_PATCH 1
/* LLVM version string */
-#define LLVM_VERSION_STRING "9.0.0"
+#define LLVM_VERSION_STRING "9.0.1"
/* Whether LLVM records statistics for use with GetStatistics(),
* PrintStatistics() or PrintStatisticsJSON()
*/
#define LLVM_FORCE_ENABLE_STATS 0
#endif
Index: stable/12/lib/clang/include/llvm/Support/VCSRevision.h
===================================================================
--- stable/12/lib/clang/include/llvm/Support/VCSRevision.h (revision 356464)
+++ stable/12/lib/clang/include/llvm/Support/VCSRevision.h (revision 356465)
@@ -1,3 +1,3 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "372316"
-#define LLVM_REPOSITORY "https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_900/final"
+#define LLVM_REVISION "c1a0a213378a458fbea1a5c77b315c7dce08fd05"
+#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
Index: stable/12/lib/libclang_rt/Makefile.inc
===================================================================
--- stable/12/lib/libclang_rt/Makefile.inc (revision 356464)
+++ stable/12/lib/libclang_rt/Makefile.inc (revision 356465)
@@ -1,45 +1,45 @@
# $FreeBSD$
.include <bsd.compiler.mk>
# armv[67] is a bit special since we allow a soft-floating version via
# CPUTYPE matching *soft*. This variant may not actually work though.
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
CRTARCH?= armhf
.else
CRTARCH?= ${MACHINE_CPUARCH:C/amd64/x86_64/}
.endif
CRTSRC= ${SRCTOP}/contrib/llvm-project/compiler-rt
.PATH: ${CRTSRC}/lib
-CLANGDIR= /usr/lib/clang/9.0.0
+CLANGDIR= /usr/lib/clang/9.0.1
LIBDIR= ${CLANGDIR}/lib/freebsd
SHLIBDIR= ${LIBDIR}
NO_PIC=
MK_PROFILE= no
WARNS?= 0
SSP_CFLAGS=
CFLAGS+= -DNDEBUG
CFLAGS+= -DHAVE_RPC_XDR_H=0
CFLAGS+= -DHAVE_TIRPC_RPC_XDR_H=0
CFLAGS+= -DSANITIZER_SUPPORTS_WEAK_HOOKS=0
CFLAGS+= -DUBSAN_CAN_USE_CXXABI
CFLAGS+= ${PICFLAG}
CFLAGS+= -fno-builtin
CFLAGS+= -fno-exceptions
CXXFLAGS+= -fno-rtti
.if ${COMPILER_TYPE} == clang && ${COMPILER_VERSION} >= 30700
CFLAGS+= -fno-sanitize=safe-stack
.endif
CFLAGS+= -fno-stack-protector
CFLAGS+= -funwind-tables
CXXFLAGS+= -fvisibility-inlines-hidden
CXXFLAGS+= -fvisibility=hidden
CFLAGS+= -I${CRTSRC}/include
CFLAGS+= -I${CRTSRC}/lib
CXXSTD= c++11
Index: stable/12/tools/build/mk/OptionalObsoleteFiles.inc
===================================================================
--- stable/12/tools/build/mk/OptionalObsoleteFiles.inc (revision 356464)
+++ stable/12/tools/build/mk/OptionalObsoleteFiles.inc (revision 356465)
@@ -1,10628 +1,10629 @@
#
# $FreeBSD$
#
# This file add support for the WITHOUT_* and WITH_* knobs in src.conf(5) to
# the check-old and delete-old* targets.
#
.if ${MK_ACCT} == no
OLD_FILES+=etc/rc.d/accounting
OLD_FILES+=etc/periodic/daily/310.accounting
OLD_FILES+=usr/sbin/accton
OLD_FILES+=usr/sbin/sa
OLD_FILES+=usr/share/man/man8/accton.8.gz
OLD_FILES+=usr/share/man/man8/sa.8.gz
OLD_FILES+=usr/tests/usr.sbin/sa/Kyuafile
OLD_FILES+=usr/tests/usr.sbin/sa/legacy_test
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-usr.in
OLD_DIRS+=usr/tests/usr.sbin/sa
.endif
.if ${MK_ACPI} == no
OLD_FILES+=etc/devd/asus.conf
OLD_FILES+=etc/rc.d/power_profile
OLD_FILES+=usr/sbin/acpiconf
OLD_FILES+=usr/sbin/acpidb
OLD_FILES+=usr/sbin/acpidump
OLD_FILES+=usr/sbin/iasl
OLD_FILES+=usr/share/man/man8/acpiconf.8.gz
OLD_FILES+=usr/share/man/man8/acpidb.8.gz
OLD_FILES+=usr/share/man/man8/acpidump.8.gz
OLD_FILES+=usr/share/man/man8/iasl.8.gz
.endif
.if ${MK_AMD} == no
OLD_FILES+=etc/amd.map
OLD_FILES+=etc/newsyslog.conf.d/amd.conf
OLD_FILES+=etc/rc.d/amd
OLD_FILES+=usr/bin/pawd
OLD_FILES+=usr/sbin/amd
OLD_FILES+=usr/sbin/amq
OLD_FILES+=usr/sbin/fixmount
OLD_FILES+=usr/sbin/fsinfo
OLD_FILES+=usr/sbin/hlfsd
OLD_FILES+=usr/sbin/mk-amd-map
OLD_FILES+=usr/sbin/wire-test
OLD_FILES+=usr/share/examples/etc/amd.map
OLD_FILES+=usr/share/man/man1/pawd.1.gz
OLD_FILES+=usr/share/man/man5/amd.conf.5.gz
OLD_FILES+=usr/share/man/man8/amd.8.gz
OLD_FILES+=usr/share/man/man8/amq.8.gz
OLD_FILES+=usr/share/man/man8/fixmount.8.gz
OLD_FILES+=usr/share/man/man8/fsinfo.8.gz
OLD_FILES+=usr/share/man/man8/hlfsd.8.gz
OLD_FILES+=usr/share/man/man8/mk-amd-map.8.gz
OLD_FILES+=usr/share/man/man8/wire-test.8.gz
.endif
.if ${MK_APM} == no
OLD_FILES+=etc/rc.d/apm
OLD_FILES+=etc/rc.d/apmd
OLD_FILES+=etc/apmd.conf
OLD_FILES+=usr/sbin/apm
OLD_FILES+=usr/share/examples/etc/apmd.conf
OLD_FILES+=usr/share/man/man8/amd64/apm.8.gz
OLD_FILES+=usr/share/man/man8/amd64/apmconf.8.gz
.endif
.if ${MK_AT} == no
OLD_FILES+=etc/pam.d/atrun
OLD_FILES+=usr/bin/at
OLD_FILES+=usr/bin/atq
OLD_FILES+=usr/bin/atrm
OLD_FILES+=usr/bin/batch
OLD_FILES+=usr/libexec/atrun
OLD_FILES+=usr/share/man/man1/at.1.gz
OLD_FILES+=usr/share/man/man1/atq.1.gz
OLD_FILES+=usr/share/man/man1/atrm.1.gz
OLD_FILES+=usr/share/man/man1/batch.1.gz
OLD_FILES+=usr/share/man/man8/atrun.8.gz
.endif
.if ${MK_ATM} == no
OLD_FILES+=usr/bin/sscop
OLD_FILES+=usr/include/netnatm/addr.h
OLD_FILES+=usr/include/netnatm/api/atmapi.h
OLD_FILES+=usr/include/netnatm/api/ccatm.h
OLD_FILES+=usr/include/netnatm/api/unisap.h
OLD_DIRS+=usr/include/netnatm/api
OLD_FILES+=usr/include/netnatm/msg/uni_config.h
OLD_FILES+=usr/include/netnatm/msg/uni_hdr.h
OLD_FILES+=usr/include/netnatm/msg/uni_ie.h
OLD_FILES+=usr/include/netnatm/msg/uni_msg.h
OLD_FILES+=usr/include/netnatm/msg/unimsglib.h
OLD_FILES+=usr/include/netnatm/msg/uniprint.h
OLD_FILES+=usr/include/netnatm/msg/unistruct.h
OLD_DIRS+=usr/include/netnatm/msg
OLD_FILES+=usr/include/netnatm/saal/sscfu.h
OLD_FILES+=usr/include/netnatm/saal/sscfudef.h
OLD_FILES+=usr/include/netnatm/saal/sscop.h
OLD_FILES+=usr/include/netnatm/saal/sscopdef.h
OLD_DIRS+=usr/include/netnatm/saal
OLD_FILES+=usr/include/netnatm/sig/uni.h
OLD_FILES+=usr/include/netnatm/sig/unidef.h
OLD_FILES+=usr/include/netnatm/sig/unisig.h
OLD_DIRS+=usr/include/netnatm/sig
OLD_FILES+=usr/include/netnatm/unimsg.h
OLD_FILES+=usr/lib/libngatm.a
OLD_FILES+=usr/lib/libngatm.so
OLD_LIBS+=usr/lib/libngatm.so.4
OLD_FILES+=usr/lib/libngatm_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libngatm.a
OLD_FILES+=usr/lib32/libngatm.so
OLD_LIBS+=usr/lib32/libngatm.so.4
OLD_FILES+=usr/lib32/libngatm_p.a
.endif
OLD_FILES+=usr/share/man/man1/sscop.1.gz
OLD_FILES+=usr/share/man/man3/libngatm.3.gz
OLD_FILES+=usr/share/man/man3/uniaddr.3.gz
OLD_FILES+=usr/share/man/man3/unifunc.3.gz
OLD_FILES+=usr/share/man/man3/unimsg.3.gz
OLD_FILES+=usr/share/man/man3/unisap.3.gz
OLD_FILES+=usr/share/man/man3/unistruct.3.gz
.endif
.if ${MK_AUDIT} == no
OLD_FILES+=etc/rc.d/auditd
OLD_FILES+=etc/rc.d/auditdistd
OLD_FILES+=usr/sbin/audit
OLD_FILES+=usr/sbin/auditd
OLD_FILES+=usr/sbin/auditdistd
OLD_FILES+=usr/sbin/auditreduce
OLD_FILES+=usr/sbin/praudit
OLD_FILES+=usr/share/man/man1/auditreduce.1.gz
OLD_FILES+=usr/share/man/man1/praudit.1.gz
OLD_FILES+=usr/share/man/man5/auditdistd.conf.5.gz
OLD_FILES+=usr/share/man/man8/audit.8.gz
OLD_FILES+=usr/share/man/man8/auditd.8.gz
OLD_FILES+=usr/share/man/man8/auditdistd.8.gz
OLD_FILES+=usr/tests/sys/audit/process-control
OLD_FILES+=usr/tests/sys/audit/open
OLD_FILES+=usr/tests/sys/audit/network
OLD_FILES+=usr/tests/sys/audit/miscellaneous
OLD_FILES+=usr/tests/sys/audit/Kyuafile
OLD_FILES+=usr/tests/sys/audit/ioctl
OLD_FILES+=usr/tests/sys/audit/inter-process
OLD_FILES+=usr/tests/sys/audit/file-write
OLD_FILES+=usr/tests/sys/audit/file-read
OLD_FILES+=usr/tests/sys/audit/file-delete
OLD_FILES+=usr/tests/sys/audit/file-create
OLD_FILES+=usr/tests/sys/audit/file-close
OLD_FILES+=usr/tests/sys/audit/file-attribute-modify
OLD_FILES+=usr/tests/sys/audit/file-attribute-access
OLD_FILES+=usr/tests/sys/audit/administrative
OLD_DIRS+=usr/tests/sys/audit
.endif
.if ${MK_AUTHPF} == no
OLD_FILES+=usr/sbin/authpf
OLD_FILES+=usr/sbin/authpf-noip
OLD_FILES+=usr/share/man/man8/authpf.8.gz
OLD_FILES+=usr/share/man/man8/authpf-noip.8.gz
.endif
.if ${MK_AUTOFS} == no
OLD_FILES+=etc/autofs/include_ldap
OLD_FILES+=etc/autofs/special_hosts
OLD_FILES+=etc/autofs/special_media
OLD_FILES+=etc/autofs/special_noauto
OLD_FILES+=etc/autofs/special_null
OLD_FILES+=etc/auto_master
OLD_FILES+=etc/rc.d/automount
OLD_FILES+=etc/rc.d/automountd
OLD_FILES+=etc/rc.d/autounmountd
OLD_FILES+=usr/sbin/automount
OLD_FILES+=usr/sbin/automountd
OLD_FILES+=usr/sbin/autounmountd
OLD_FILES+=usr/share/man/man5/autofs.5.gz
OLD_FILES+=usr/share/man/man5/auto_master.5.gz
OLD_FILES+=usr/share/man/man8/automount.8.gz
OLD_FILES+=usr/share/man/man8/automountd.8.gz
OLD_FILES+=usr/share/man/man8/autounmountd.8.gz
OLD_DIRS+=etc/autofs
.endif
.if ${MK_BHYVE} == no
OLD_FILES+=usr/lib/libvmmapi.a
OLD_FILES+=usr/lib/libvmmapi.so
OLD_LIBS+=usr/lib/libvmmapi.so.5
OLD_FILES+=usr/include/vmmapi.h
OLD_FILES+=usr/sbin/bhyve
OLD_FILES+=usr/sbin/bhyvectl
OLD_FILES+=usr/sbin/bhyveload
OLD_FILES+=usr/share/examples/bhyve/vmrun.sh
OLD_FILES+=usr/share/man/man8/bhyve.8.gz
OLD_FILES+=usr/share/man/man8/bhyveload.8.gz
OLD_DIRS+=usr/share/examples/bhyve
.endif
.if ${MK_BINUTILS} == no
OLD_FILES+=usr/bin/as
.if ${MK_LLD_IS_LD} == no
OLD_FILES+=usr/bin/ld
OLD_FILES+=usr/share/man/man1/ld.1.gz
.endif
OLD_FILES+=usr/bin/objdump
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xw
OLD_FILES+=usr/share/man/man1/as.1.gz
OLD_FILES+=usr/share/man/man1/objdump.1.gz
OLD_FILES+=usr/share/man/man7/as.7.gz
OLD_FILES+=usr/share/man/man7/ld.7.gz
OLD_FILES+=usr/share/man/man7/ldint.7.gz
OLD_FILES+=usr/share/man/man7/binutils.7.gz
.endif
.if ${MK_BINUTILS} == no || ${MK_LLD_IS_LD} == yes
OLD_FILES+=usr/bin/ld.bfd
.endif
.if ${MK_BLACKLIST} == no
OLD_FILES+=etc/rc.d/blacklistd
OLD_FILES+=usr/include/blacklist.h
OLD_FILES+=usr/lib/libblacklist.a
OLD_FILES+=usr/lib/libblacklist_p.a
OLD_FILES+=usr/lib/libblacklist.so
OLD_LIBS+=usr/lib/libblacklist.so.0
OLD_FILES+=usr/libexec/blacklistd-helper
OLD_FILES+=usr/sbin/blacklistctl
OLD_FILES+=usr/sbin/blacklistd
OLD_FILES+=usr/share/man/man3/blacklist.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_close.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_open.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_r.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_sa.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_sa_r.3.gz
OLD_FILES+=usr/share/man/man5/blacklistd.conf.5.gz
OLD_FILES+=usr/share/man/man8/blacklistctl.8.gz
OLD_FILES+=usr/share/man/man8/blacklistd.8.gz
.endif
.if ${MK_BLUETOOTH} == no
OLD_FILES+=etc/bluetooth/hcsecd.conf
OLD_FILES+=etc/bluetooth/hosts
OLD_FILES+=etc/bluetooth/protocols
OLD_FILES+=etc/defaults/bluetooth.device.conf
OLD_FILES+=etc/devd/iwmbtfw.conf
OLD_DIRS+=etc/bluetooth
OLD_FILES+=etc/rc.d/bluetooth
OLD_FILES+=etc/rc.d/bthidd
OLD_FILES+=etc/rc.d/hcsecd
OLD_FILES+=etc/rc.d/rfcomm_pppd_server
OLD_FILES+=etc/rc.d/sdpd
OLD_FILES+=etc/rc.d/ubthidhci
OLD_FILES+=usr/bin/bthost
OLD_FILES+=usr/bin/btsockstat
OLD_FILES+=usr/bin/rfcomm_sppd
OLD_FILES+=usr/include/bluetooth.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bluetooth.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bt3c.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_hci_raw.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_l2cap.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_rfcomm.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_sco.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_h4.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_hci.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_l2cap.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_ubt.h
OLD_DIRS+=usr/include/netgraph/bluetooth/include
OLD_DIRS+=usr/include/netgraph/bluetooth
OLD_FILES+=usr/include/sdp.h
OLD_FILES+=usr/lib/libbluetooth.a
OLD_FILES+=usr/lib/libbluetooth.so
OLD_LIBS+=usr/lib/libbluetooth.so.4
OLD_FILES+=usr/lib/libbluetooth_p.a
OLD_FILES+=usr/lib/libsdp.a
OLD_FILES+=usr/lib/libsdp.so
OLD_LIBS+=usr/lib/libsdp.so.4
OLD_FILES+=usr/lib/libsdp_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libbluetooth.a
OLD_FILES+=usr/lib32/libbluetooth.so
OLD_LIBS+=usr/lib32/libbluetooth.so.4
OLD_FILES+=usr/lib32/libbluetooth_p.a
OLD_FILES+=usr/lib32/libsdp.a
OLD_FILES+=usr/lib32/libsdp.so
OLD_LIBS+=usr/lib32/libsdp.so.4
OLD_FILES+=usr/lib32/libsdp_p.a
.endif
OLD_FILES+=usr/sbin/ath3kfw
OLD_FILES+=usr/sbin/bcmfw
OLD_FILES+=usr/sbin/bluetooth-config
OLD_FILES+=usr/sbin/bt3cfw
OLD_FILES+=usr/sbin/bthidcontrol
OLD_FILES+=usr/sbin/bthidd
OLD_FILES+=usr/sbin/btpand
OLD_FILES+=usr/sbin/hccontrol
OLD_FILES+=usr/sbin/hcsecd
OLD_FILES+=usr/sbin/hcseriald
OLD_FILES+=usr/sbin/iwmbtfw
OLD_FILES+=usr/sbin/l2control
OLD_FILES+=usr/sbin/l2ping
OLD_FILES+=usr/sbin/rfcomm_pppd
OLD_FILES+=usr/sbin/sdpcontrol
OLD_FILES+=usr/sbin/sdpd
OLD_FILES+=usr/share/examples/etc/defaults/bluetooth.device.conf
OLD_FILES+=usr/share/man/man1/bthost.1.gz
OLD_FILES+=usr/share/man/man1/btsockstat.1.gz
OLD_FILES+=usr/share/man/man1/rfcomm_sppd.1.gz
OLD_FILES+=usr/share/man/man3/SDP_GET128.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET16.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET32.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET64.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET8.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT128.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT16.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT32.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT64.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT8.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_any.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_copy.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_same.3.gz
OLD_FILES+=usr/share/man/man3/bluetooth.3.gz
OLD_FILES+=usr/share/man/man3/bt_aton.3.gz
OLD_FILES+=usr/share/man/man3/bt_devaddr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devclose.3.gz
OLD_FILES+=usr/share/man/man3/bt_devenum.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_clr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_set.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_tst.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_clr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_set.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_tst.3.gz
OLD_FILES+=usr/share/man/man3/bt_devinfo.3.gz
OLD_FILES+=usr/share/man/man3/bt_devinquiry.3.gz
OLD_FILES+=usr/share/man/man3/bt_devname.3.gz
OLD_FILES+=usr/share/man/man3/bt_devopen.3.gz
OLD_FILES+=usr/share/man/man3/bt_devreq.3.gz
OLD_FILES+=usr/share/man/man3/bt_devsend.3.gz
OLD_FILES+=usr/share/man/man3/bt_endhostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_endprotoent.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostbyaddr.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostbyname.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotobyname.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotobynumber.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotoent.3.gz
OLD_FILES+=usr/share/man/man3/bt_ntoa.3.gz
OLD_FILES+=usr/share/man/man3/bt_sethostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_setprotoent.3.gz
OLD_FILES+=usr/share/man/man3/sdp.3.gz
OLD_FILES+=usr/share/man/man3/sdp_attr2desc.3.gz
OLD_FILES+=usr/share/man/man3/sdp_change_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_close.3.gz
OLD_FILES+=usr/share/man/man3/sdp_error.3.gz
OLD_FILES+=usr/share/man/man3/sdp_open.3.gz
OLD_FILES+=usr/share/man/man3/sdp_open_local.3.gz
OLD_FILES+=usr/share/man/man3/sdp_register_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_search.3.gz
OLD_FILES+=usr/share/man/man3/sdp_unregister_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_uuid2desc.3.gz
OLD_FILES+=usr/share/man/man4/ng_bluetooth.4.gz
OLD_FILES+=usr/share/man/man5/bluetooth.device.conf.5.gz
OLD_FILES+=usr/share/man/man5/bluetooth.hosts.5.gz
OLD_FILES+=usr/share/man/man5/bluetooth.protocols.5.gz
OLD_FILES+=usr/share/man/man5/hcsecd.conf.5.gz
OLD_FILES+=usr/share/man/man8/ath3kfw.8.gz
OLD_FILES+=usr/share/man/man8/bcmfw.8.gz
OLD_FILES+=usr/share/man/man8/bluetooth-config.8.gz
OLD_FILES+=usr/share/man/man8/bt3cfw.8.gz
OLD_FILES+=usr/share/man/man8/bthidcontrol.8.gz
OLD_FILES+=usr/share/man/man8/bthidd.8.gz
OLD_FILES+=usr/share/man/man8/btpand.8.gz
OLD_FILES+=usr/share/man/man8/hccontrol.8.gz
OLD_FILES+=usr/share/man/man8/hcsecd.8.gz
OLD_FILES+=usr/share/man/man8/hcseriald.8.gz
OLD_FILES+=usr/share/man/man8/iwmbtfw.8.gz
OLD_FILES+=usr/share/man/man8/l2control.8.gz
OLD_FILES+=usr/share/man/man8/l2ping.8.gz
OLD_FILES+=usr/share/man/man8/rfcomm_pppd.8.gz
OLD_FILES+=usr/share/man/man8/sdpcontrol.8.gz
OLD_FILES+=usr/share/man/man8/sdpd.8.gz
.endif
.if ${MK_BOOT} == no
OLD_FILES+=boot/beastie.4th
OLD_FILES+=boot/boot
OLD_FILES+=boot/boot0
OLD_FILES+=boot/boot0sio
OLD_FILES+=boot/boot1
OLD_FILES+=boot/boot1.efi
OLD_FILES+=boot/boot1.efifat
OLD_FILES+=boot/boot2
OLD_FILES+=boot/brand.4th
OLD_FILES+=boot/cdboot
OLD_FILES+=boot/check-password.4th
OLD_FILES+=boot/color.4th
OLD_FILES+=boot/defaults/loader.conf
OLD_FILES+=boot/delay.4th
OLD_FILES+=boot/device.hints
OLD_FILES+=boot/frames.4th
OLD_FILES+=boot/gptboot
OLD_FILES+=boot/gptzfsboot
OLD_FILES+=boot/loader
OLD_FILES+=boot/loader.4th
OLD_FILES+=boot/loader.efi
OLD_FILES+=boot/loader.help
OLD_FILES+=boot/loader.rc
OLD_FILES+=boot/mbr
OLD_FILES+=boot/menu-commands.4th
OLD_FILES+=boot/menu.4th
OLD_FILES+=boot/menu.rc
OLD_FILES+=boot/menusets.4th
OLD_FILES+=boot/pcibios.4th
OLD_FILES+=boot/pmbr
OLD_FILES+=boot/pxeboot
OLD_FILES+=boot/screen.4th
OLD_FILES+=boot/shortcuts.4th
OLD_FILES+=boot/support.4th
OLD_FILES+=boot/userboot.so
OLD_FILES+=boot/version.4th
OLD_FILES+=boot/zfsboot
OLD_FILES+=boot/zfsloader
OLD_FILES+=usr/lib/kgzldr.o
OLD_FILES+=usr/share/man/man5/loader.conf.5.gz
OLD_FILES+=usr/share/man/man8/beastie.4th.8.gz
OLD_FILES+=usr/share/man/man8/brand.4th.8.gz
OLD_FILES+=usr/share/man/man8/check-password.4th.8.gz
OLD_FILES+=usr/share/man/man8/color.4th.8.gz
OLD_FILES+=usr/share/man/man8/delay.4th.8.gz
OLD_FILES+=usr/share/man/man8/gptboot.8.gz
OLD_FILES+=usr/share/man/man8/gptzfsboot.8.gz
OLD_FILES+=usr/share/man/man8/loader.4th.8.gz
OLD_FILES+=usr/share/man/man8/loader.8.gz
OLD_FILES+=usr/share/man/man8/menu.4th.8.gz
OLD_FILES+=usr/share/man/man8/menusets.4th.8.gz
OLD_FILES+=usr/share/man/man8/pxeboot.8.gz
OLD_FILES+=usr/share/man/man8/version.4th.8.gz
OLD_FILES+=usr/share/man/man8/zfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zfsloader.8.gz
.endif
.if ${MK_BOOTPARAMD} == no
OLD_FILES+=usr/sbin/bootparamd
OLD_FILES+=usr/share/man/man5/bootparams.5.gz
OLD_FILES+=usr/share/man/man8/bootparamd.8.gz
OLD_FILES+=usr/sbin/callbootd
.endif
.if ${MK_BOOTPD} == no
OLD_FILES+=usr/libexec/bootpd
OLD_FILES+=usr/share/man/man5/bootptab.5.gz
OLD_FILES+=usr/share/man/man8/bootpd.8.gz
OLD_FILES+=usr/libexec/bootpgw
OLD_FILES+=usr/sbin/bootpef
OLD_FILES+=usr/share/man/man8/bootpef.8.gz
OLD_FILES+=usr/sbin/bootptest
OLD_FILES+=usr/share/man/man8/bootptest.8.gz
.endif
.if ${MK_BSD_CPIO} == no
OLD_FILES+=usr/bin/bsdcpio
OLD_FILES+=usr/bin/cpio
OLD_FILES+=usr/share/man/man1/bsdcpio.1.gz
OLD_FILES+=usr/share/man/man1/cpio.1.gz
.endif
.if ${MK_BSDINSTALL} == no
OLD_FILES+=usr/libexec/bsdinstall/adduser
OLD_FILES+=usr/libexec/bsdinstall/auto
OLD_FILES+=usr/libexec/bsdinstall/autopart
OLD_FILES+=usr/libexec/bsdinstall/bootconfig
OLD_FILES+=usr/libexec/bsdinstall/checksum
OLD_FILES+=usr/libexec/bsdinstall/config
OLD_FILES+=usr/libexec/bsdinstall/distextract
OLD_FILES+=usr/libexec/bsdinstall/distfetch
OLD_FILES+=usr/libexec/bsdinstall/docsinstall
OLD_FILES+=usr/libexec/bsdinstall/entropy
OLD_FILES+=usr/libexec/bsdinstall/hardening
OLD_FILES+=usr/libexec/bsdinstall/hostname
OLD_FILES+=usr/libexec/bsdinstall/jail
OLD_FILES+=usr/libexec/bsdinstall/keymap
OLD_FILES+=usr/libexec/bsdinstall/mirrorselect
OLD_FILES+=usr/libexec/bsdinstall/mount
OLD_FILES+=usr/libexec/bsdinstall/netconfig
OLD_FILES+=usr/libexec/bsdinstall/netconfig_ipv4
OLD_FILES+=usr/libexec/bsdinstall/netconfig_ipv6
OLD_FILES+=usr/libexec/bsdinstall/partedit
OLD_FILES+=usr/libexec/bsdinstall/rootpass
OLD_FILES+=usr/libexec/bsdinstall/script
OLD_FILES+=usr/libexec/bsdinstall/scriptedpart
OLD_FILES+=usr/libexec/bsdinstall/services
OLD_FILES+=usr/libexec/bsdinstall/time
OLD_FILES+=usr/libexec/bsdinstall/umount
OLD_FILES+=usr/libexec/bsdinstall/wlanconfig
OLD_FILES+=usr/libexec/bsdinstall/zfsboot
OLD_FILES+=usr/sbin/bsdinstall
OLD_FILES+=usr/share/man/man8/bsdinstall.8.gz
OLD_FILES+=usr/share/man/man8/sade.8.gz
OLD_DIRS+=usr/libexec/bsdinstall
.endif
.if ${MK_BSNMP} == no
OLD_FILES+=etc/snmpd.config
OLD_FILES+=etc/rc.d/bsnmpd
OLD_FILES+=usr/bin/bsnmpget
OLD_FILES+=usr/bin/bsnmpset
OLD_FILES+=usr/bin/bsnmpwalk
OLD_FILES+=usr/include/bsnmp/asn1.h
OLD_FILES+=usr/include/bsnmp/bridge_snmp.h
OLD_FILES+=usr/include/bsnmp/snmp.h
OLD_FILES+=usr/include/bsnmp/snmp_mibII.h
OLD_FILES+=usr/include/bsnmp/snmp_netgraph.h
OLD_FILES+=usr/include/bsnmp/snmpagent.h
OLD_FILES+=usr/include/bsnmp/snmpclient.h
OLD_FILES+=usr/include/bsnmp/snmpmod.h
OLD_FILES+=usr/lib/libbsnmp.a
OLD_FILES+=usr/lib/libbsnmp.so
OLD_LIBS+=usr/lib/libbsnmp.so.6
OLD_FILES+=usr/lib/libbsnmp_p.a
OLD_FILES+=usr/lib/libbsnmptools.a
OLD_FILES+=usr/lib/libbsnmptools.so
OLD_LIBS+=usr/lib/libbsnmptools.so.0
OLD_FILES+=usr/lib/libbsnmptools_p.a
OLD_FILES+=usr/lib/snmp_bridge.so
OLD_LIBS+=usr/lib/snmp_bridge.so.6
OLD_FILES+=usr/lib/snmp_hast.so
OLD_LIBS+=usr/lib/snmp_hast.so.6
OLD_FILES+=usr/lib/snmp_hostres.so
OLD_LIBS+=usr/lib/snmp_hostres.so.6
OLD_FILES+=usr/lib/snmp_lm75.so
OLD_LIBS+=usr/lib/snmp_lm75.so.6
OLD_FILES+=usr/lib/snmp_mibII.so
OLD_LIBS+=usr/lib/snmp_mibII.so.6
OLD_FILES+=usr/lib/snmp_netgraph.so
OLD_LIBS+=usr/lib/snmp_netgraph.so.6
OLD_FILES+=usr/lib/snmp_pf.so
OLD_LIBS+=usr/lib/snmp_pf.so.6
OLD_FILES+=usr/lib/snmp_target.so
OLD_LIBS+=usr/lib/snmp_target.so.6
OLD_FILES+=usr/lib/snmp_usm.so
OLD_LIBS+=usr/lib/snmp_usm.so.6
OLD_FILES+=usr/lib/snmp_vacm.so
OLD_LIBS+=usr/lib/snmp_vacm.so.6
OLD_FILES+=usr/lib/snmp_wlan.so
OLD_LIBS+=usr/lib/snmp_wlan.so.6
OLD_FILES+=usr/lib32/libbsnmp.a
OLD_FILES+=usr/lib32/libbsnmp.so
OLD_LIBS+=usr/lib32/libbsnmp.so.6
OLD_FILES+=usr/lib32/libbsnmp_p.a
OLD_FILES+=usr/sbin/bsnmpd
OLD_FILES+=usr/sbin/gensnmptree
OLD_FILES+=usr/share/examples/etc/snmpd.config
OLD_FILES+=usr/share/man/man1/bsnmpd.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpget.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpset.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpwalk.1.gz
OLD_FILES+=usr/share/man/man1/gensnmptree.1.gz
# lib/libbsnmp/libbsnmp
OLD_FILES+=usr/share/man/man3/TRUTH_GET.3.gz
OLD_FILES+=usr/share/man/man3/TRUTH_MK.3.gz
OLD_FILES+=usr/share/man/man3/TRUTH_OK.3.gz
OLD_FILES+=usr/share/man/man3/asn1.3.gz
OLD_FILES+=usr/share/man/man3/asn_append_oid.3.gz
OLD_FILES+=usr/share/man/man3/asn_commit_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_compare_oid.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_counter64_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_integer.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_integer_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_ipaddress.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_ipaddress_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_null.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_null_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_objid.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_objid_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_octetstring.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_octetstring_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_sequence.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_timeticks.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_uint32_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_is_suboid.3.gz
OLD_FILES+=usr/share/man/man3/asn_oid2str.3.gz
OLD_FILES+=usr/share/man/man3/asn_oid2str_r.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_counter64.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_exception.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_integer.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_ipaddress.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_null.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_objid.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_octetstring.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_temp_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_timeticks.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_uint32.3.gz
OLD_FILES+=usr/share/man/man3/asn_skip.3.gz
OLD_FILES+=usr/share/man/man3/asn_slice_oid.3.gz
OLD_FILES+=usr/share/man/man3/snmp_add_binding.3.gz
OLD_FILES+=usr/share/man/man3/snmp_calc_keychange.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_init.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_set_host.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_set_port.3.gz
OLD_FILES+=usr/share/man/man3/snmp_close.3.gz
OLD_FILES+=usr/share/man/man3/snmp_debug.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_commit.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_finish.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_lookup.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_rollback.3.gz
OLD_FILES+=usr/share/man/man3/snmp_depop_t.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dialog.3.gz
OLD_FILES+=usr/share/man/man3/snmp_discover_engine.3.gz
OLD_FILES+=usr/share/man/man3/snmp_get.3.gz
OLD_FILES+=usr/share/man/man3/snmp_get_local_keys.3.gz
OLD_FILES+=usr/share/man/man3/snmp_getbulk.3.gz
OLD_FILES+=usr/share/man/man3/snmp_getnext.3.gz
OLD_FILES+=usr/share/man/man3/snmp_init_context.3.gz
OLD_FILES+=usr/share/man/man3/snmp_make_errresp.3.gz
OLD_FILES+=usr/share/man/man3/snmp_oid_append.3.gz
OLD_FILES+=usr/share/man/man3/snmp_op_t.3.gz
OLD_FILES+=usr/share/man/man3/snmp_open.3.gz
OLD_FILES+=usr/share/man/man3/snmp_parse_server.3.gz
OLD_FILES+=usr/share/man/man3/snmp_passwd_to_keys.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_check.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_create.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_header.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_scoped.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_secmode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_dump.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_encode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_free.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_init_secparams.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_send.3.gz
OLD_FILES+=usr/share/man/man3/snmp_receive.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_set.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_fetch.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_fetch_async.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_start_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_stop_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_trace.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_copy.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_free.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_parse.3.gz
OLD_FILES+=usr/share/man/man3/tree_size.3.gz
# usr.sbin/bsnmpd/bsnmpd
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/asn1.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpagent.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpclient.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_get_target_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_get_usm_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_reset_usm_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmplib.3.gz
OLD_FILES+=usr/share/man/man3/buf_alloc.3.gz
OLD_FILES+=usr/share/man/man3/buf_size.3.gz
OLD_FILES+=usr/share/man/man3/comm_define.3.gz
OLD_FILES+=usr/share/man/man3/community.3.gz
OLD_FILES+=usr/share/man/man3/fd_deselect.3.gz
OLD_FILES+=usr/share/man/man3/fd_resume.3.gz
OLD_FILES+=usr/share/man/man3/fd_select.3.gz
OLD_FILES+=usr/share/man/man3/fd_suspend.3.gz
OLD_FILES+=usr/share/man/man3/get_ticks.3.gz
OLD_FILES+=usr/share/man/man3/index_append.3.gz
OLD_FILES+=usr/share/man/man3/index_append_off.3.gz
OLD_FILES+=usr/share/man/man3/index_compare.3.gz
OLD_FILES+=usr/share/man/man3/index_compare_off.3.gz
OLD_FILES+=usr/share/man/man3/index_decode.3.gz
OLD_FILES+=usr/share/man/man3/ip_commit.3.gz
OLD_FILES+=usr/share/man/man3/ip_get.3.gz
OLD_FILES+=usr/share/man/man3/ip_rollback.3.gz
OLD_FILES+=usr/share/man/man3/ip_save.3.gz
OLD_FILES+=usr/share/man/man3/or_register.3.gz
OLD_FILES+=usr/share/man/man3/or_unregister.3.gz
OLD_FILES+=usr/share/man/man3/oid_commit.3.gz
OLD_FILES+=usr/share/man/man3/oid_get.3.gz
OLD_FILES+=usr/share/man/man3/oid_rollback.3.gz
OLD_FILES+=usr/share/man/man3/oid_save.3.gz
OLD_FILES+=usr/share/man/man3/oid_usmNotInTimeWindows.3.gz
OLD_FILES+=usr/share/man/man3/oid_usmUnknownEngineIDs.3.gz
OLD_FILES+=usr/share/man/man3/oid_zeroDotZero.3.gz
OLD_FILES+=usr/share/man/man3/reqid_allocate.3.gz
OLD_FILES+=usr/share/man/man3/reqid_base.3.gz
OLD_FILES+=usr/share/man/man3/reqid_istype.3.gz
OLD_FILES+=usr/share/man/man3/reqid_next.3.gz
OLD_FILES+=usr/share/man/man3/reqid_type.3.gz
OLD_FILES+=usr/share/man/man3/snmp_bridge.3.gz
OLD_FILES+=usr/share/man/man3/snmp_hast.3.gz
OLD_FILES+=usr/share/man/man3/snmp_hostres.3.gz
OLD_FILES+=usr/share/man/man3/snmp_input_finish.3.gz
OLD_FILES+=usr/share/man/man3/snmp_input_start.3.gz
OLD_FILES+=usr/share/man/man3/snmp_lm75.3.gz
OLD_FILES+=usr/share/man/man3/snmp_mibII.3.gz
OLD_FILES+=usr/share/man/man3/snmp_netgraph.3.gz
OLD_FILES+=usr/share/man/man3/snmp_output.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_auth_access.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_port.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_trap.3.gz
OLD_FILES+=usr/share/man/man3/snmp_target.3.gz
OLD_FILES+=usr/share/man/man3/snmp_usm.3.gz
OLD_FILES+=usr/share/man/man3/snmp_vacm.3.gz
OLD_FILES+=usr/share/man/man3/snmp_wlan.3.gz
OLD_FILES+=usr/share/man/man3/snmpd_target_stat.3.gz
OLD_FILES+=usr/share/man/man3/snmpd_usmstats.3.gz
OLD_FILES+=usr/share/man/man3/snmpmod.3.gz
OLD_FILES+=usr/share/man/man3/start_tick.3.gz
OLD_FILES+=usr/share/man/man3/string_commit.3.gz
OLD_FILES+=usr/share/man/man3/string_free.3.gz
OLD_FILES+=usr/share/man/man3/string_get.3.gz
OLD_FILES+=usr/share/man/man3/string_get_max.3.gz
OLD_FILES+=usr/share/man/man3/string_rollback.3.gz
OLD_FILES+=usr/share/man/man3/string_save.3.gz
OLD_FILES+=usr/share/man/man3/systemg.3.gz
OLD_FILES+=usr/share/man/man3/this_tick.3.gz
OLD_FILES+=usr/share/man/man3/timer_start.3.gz
OLD_FILES+=usr/share/man/man3/timer_start_repeat.3.gz
OLD_FILES+=usr/share/man/man3/timer_stop.3.gz
OLD_FILES+=usr/share/man/man3/target_activate_address.3.gz
OLD_FILES+=usr/share/man/man3/target_address.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_address.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_param.3.gz
OLD_FILES+=usr/share/man/man3/target_first_address.3.gz
OLD_FILES+=usr/share/man/man3/target_first_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_first_param.3.gz
OLD_FILES+=usr/share/man/man3/target_flush_all.3.gz
OLD_FILES+=usr/share/man/man3/target_next_address.3.gz
OLD_FILES+=usr/share/man/man3/target_next_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_next_param.3.gz
OLD_FILES+=usr/share/man/man3/target_new_address.3.gz
OLD_FILES+=usr/share/man/man3/target_new_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_new_param.3.gz
OLD_FILES+=usr/share/man/man3/target_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_param.3.gz
OLD_FILES+=usr/share/man/man3/usm_delete_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_find_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_first_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_flush_users.3.gz
OLD_FILES+=usr/share/man/man3/usm_next_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_new_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_user.3.gz
OLD_FILES+=usr/share/snmp/defs/bridge_tree.def
OLD_FILES+=usr/share/snmp/defs/hast_tree.def
OLD_FILES+=usr/share/snmp/defs/hostres_tree.def
OLD_FILES+=usr/share/snmp/defs/lm75_tree.def
OLD_FILES+=usr/share/snmp/defs/mibII_tree.def
OLD_FILES+=usr/share/snmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/snmp/defs/pf_tree.def
OLD_FILES+=usr/share/snmp/defs/target_tree.def
OLD_FILES+=usr/share/snmp/defs/tree.def
OLD_FILES+=usr/share/snmp/defs/usm_tree.def
OLD_FILES+=usr/share/snmp/defs/vacm_tree.def
OLD_FILES+=usr/share/snmp/defs/wlan_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-BRIDGE-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HAST-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HOSTRES-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-IP-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-LM75-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-MIB2-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-NETGRAPH.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-PF-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-SNMPD.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-WIRELESS-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BRIDGE-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/FOKUS-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/RSTP-MIB.txt
OLD_DIRS+=usr/include/bsnmp
OLD_DIRS+=usr/share/snmp
OLD_DIRS+=usr/share/snmp/defs
OLD_DIRS+=usr/share/snmp/mibs
.endif
.if ${MK_CALENDAR} == no
OLD_FILES+=etc/periodic/daily/300.calendar
OLD_FILES+=usr/bin/calendar
OLD_FILES+=usr/share/calendar/calendar.all
OLD_FILES+=usr/share/calendar/calendar.australia
OLD_FILES+=usr/share/calendar/calendar.birthday
OLD_FILES+=usr/share/calendar/calendar.brazilian
OLD_FILES+=usr/share/calendar/calendar.christian
OLD_FILES+=usr/share/calendar/calendar.computer
OLD_FILES+=usr/share/calendar/calendar.croatian
OLD_FILES+=usr/share/calendar/calendar.dutch
OLD_FILES+=usr/share/calendar/calendar.freebsd
OLD_FILES+=usr/share/calendar/calendar.french
OLD_FILES+=usr/share/calendar/calendar.german
OLD_FILES+=usr/share/calendar/calendar.history
OLD_FILES+=usr/share/calendar/calendar.holiday
OLD_FILES+=usr/share/calendar/calendar.hungarian
OLD_FILES+=usr/share/calendar/calendar.judaic
OLD_FILES+=usr/share/calendar/calendar.lotr
OLD_FILES+=usr/share/calendar/calendar.music
OLD_FILES+=usr/share/calendar/calendar.newzealand
OLD_FILES+=usr/share/calendar/calendar.russian
OLD_FILES+=usr/share/calendar/calendar.southafrica
OLD_FILES+=usr/share/calendar/calendar.ukrainian
OLD_FILES+=usr/share/calendar/calendar.usholiday
OLD_FILES+=usr/share/calendar/calendar.world
OLD_FILES+=usr/share/calendar/de_AT.ISO_8859-15/calendar.feiertag
OLD_DIRS+=usr/share/calendar/de_AT.ISO_8859-15
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.feiertag
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.geschichte
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.kirche
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.literatur
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.musik
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.wissenschaft
OLD_DIRS+=usr/share/calendar/de_DE.ISO8859-1
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-15
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.fetes
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.french
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.jferies
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.proverbes
OLD_DIRS+=usr/share/calendar/fr_FR.ISO8859-1
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-15
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.praznici
OLD_DIRS+=usr/share/calendar/hr_HR.ISO8859-2
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.nevnapok
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.unnepek
OLD_DIRS+=usr/share/calendar/hu_HU.ISO8859-2
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.mcommemorative
OLD_DIRS+=usr/share/calendar/pt_BR.ISO8859-1
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.mcommemorative
OLD_DIRS+=usr/share/calendar/pt_BR.UTF-8
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.pagan
OLD_DIRS+=usr/share/calendar/ru_RU.KOI8-R
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.pagan
OLD_DIRS+=usr/share/calendar/ru_RU.UTF-8
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.all
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.holiday
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.misc
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.orthodox
OLD_DIRS+=usr/share/calendar/uk_UA.KOI8-U
OLD_DIRS+=usr/share/calendar
OLD_FILES+=usr/share/man/man1/calendar.1.gz
OLD_FILES+=usr/tests/usr.bin/calendar/Kyuafile
OLD_FILES+=usr/tests/usr.bin/calendar/calendar.calibrate
OLD_FILES+=usr/tests/usr.bin/calendar/legacy_test
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.sh
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-6.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-7.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-6.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-7.out
OLD_DIRS+=usr/tests/usr.bin/calendar
.endif
.if ${MK_CASPER} == no
OLD_FILES+=etc/casper/system.dns
OLD_FILES+=etc/casper/system.grp
OLD_FILES+=etc/casper/system.pwd
OLD_FILES+=etc/casper/system.random
OLD_FILES+=etc/casper/system.sysctl
OLD_FILES+=etc/rc.d/casperd
OLD_LIBS+=lib/libcapsicum.so.0
OLD_LIBS+=lib/libcasper.so.0
OLD_FILES+=libexec/casper/dns
OLD_FILES+=libexec/casper/grp
OLD_FILES+=libexec/casper/pwd
OLD_FILES+=libexec/casper/random
OLD_FILES+=libexec/casper/sysctl
OLD_FILES+=sbin/casper
OLD_FILES+=sbin/casperd
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/include/libcapsicum_dns.h
OLD_FILES+=usr/include/libcapsicum_grp.h
OLD_FILES+=usr/include/libcapsicum_pwd.h
OLD_FILES+=usr/include/libcapsicum_random.h
OLD_FILES+=usr/include/libcapsicum_service.h
OLD_FILES+=usr/include/libcapsicum_sysctl.h
OLD_FILES+=usr/include/libcasper.h
OLD_FILES+=usr/lib/libcapsicum.a
OLD_FILES+=usr/lib/libcapsicum.so
OLD_FILES+=usr/lib/libcapsicum_p.a
OLD_FILES+=usr/lib/libcasper.a
OLD_FILES+=usr/lib/libcasper.so
OLD_FILES+=usr/lib/libcasper_p.a
OLD_FILES+=usr/lib32/libcapsicum.a
OLD_FILES+=usr/lib32/libcapsicum.so
OLD_LIBS+=usr/lib32/libcapsicum.so.0
OLD_FILES+=usr/lib32/libcapsicum_p.a
OLD_FILES+=usr/lib32/libcasper.a
OLD_FILES+=usr/lib32/libcasper.so
OLD_LIBS+=usr/lib32/libcasper.so.0
OLD_FILES+=usr/lib32/libcasper_p.a
OLD_FILES+=usr/share/man/man3/cap_clone.3.gz
OLD_FILES+=usr/share/man/man3/cap_close.3.gz
OLD_FILES+=usr/share/man/man3/cap_init.3.gz
OLD_FILES+=usr/share/man/man3/cap_limit_get.3.gz
OLD_FILES+=usr/share/man/man3/cap_limit_set.3.gz
OLD_FILES+=usr/share/man/man3/cap_recv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/cap_send_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/cap_service_open.3.gz
OLD_FILES+=usr/share/man/man3/cap_sock.3.gz
OLD_FILES+=usr/share/man/man3/cap_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/cap_wrap.3.gz
OLD_FILES+=usr/share/man/man3/cap_xfer_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz
OLD_FILES+=usr/share/man/man8/casperd.8.gz
.endif
.if ${MK_CCD} == no
OLD_FILES+=etc/rc.d/ccd
OLD_FILES+=rescue/ccdconfig
OLD_FILES+=sbin/ccdconfig
OLD_FILES+=usr/share/man/man4/ccd.4.gz
OLD_FILES+=usr/share/man/man8/ccdconfig.8.gz
.endif
.if ${MK_CDDL} == no
OLD_LIBS+=lib/libavl.so.2
OLD_LIBS+=lib/libctf.so.2
OLD_LIBS+=lib/libdtrace.so.2
OLD_LIBS+=lib/libnvpair.so.2
OLD_LIBS+=lib/libumem.so.2
OLD_LIBS+=lib/libuutil.so.2
OLD_FILES+=usr/bin/ctfconvert
OLD_FILES+=usr/bin/ctfdump
OLD_FILES+=usr/bin/ctfmerge
OLD_FILES+=usr/lib/dtrace/drti.o
OLD_FILES+=usr/lib/dtrace/errno.d
OLD_FILES+=usr/lib/dtrace/io.d
OLD_FILES+=usr/lib/dtrace/ip.d
OLD_FILES+=usr/lib/dtrace/psinfo.d
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/lib/dtrace/regs_x86.d
.endif
OLD_FILES+=usr/lib/dtrace/signal.d
OLD_FILES+=usr/lib/dtrace/tcp.d
OLD_FILES+=usr/lib/dtrace/udp.d
OLD_FILES+=usr/lib/dtrace/unistd.d
OLD_FILES+=usr/lib/libavl.a
OLD_FILES+=usr/lib/libavl.so
OLD_FILES+=usr/lib/libavl_p.a
OLD_FILES+=usr/lib/libctf.a
OLD_FILES+=usr/lib/libctf.so
OLD_FILES+=usr/lib/libctf_p.a
OLD_FILES+=usr/lib/libdtrace.a
OLD_FILES+=usr/lib/libdtrace.so
OLD_FILES+=usr/lib/libdtrace_p.a
OLD_FILES+=usr/lib/libnvpair.a
OLD_FILES+=usr/lib/libnvpair.so
OLD_FILES+=usr/lib/libnvpair_p.a
OLD_FILES+=usr/lib/libumem.a
OLD_FILES+=usr/lib/libumem.so
OLD_FILES+=usr/lib/libumem_p.a
OLD_FILES+=usr/lib/libuutil.a
OLD_FILES+=usr/lib/libuutil.so
OLD_FILES+=usr/lib/libuutil_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/dtrace/drti.o
OLD_FILES+=usr/lib32/libavl.a
OLD_FILES+=usr/lib32/libavl.so
OLD_LIBS+=usr/lib32/libavl.so.2
OLD_FILES+=usr/lib32/libavl_p.a
OLD_FILES+=usr/lib32/libctf.a
OLD_FILES+=usr/lib32/libctf.so
OLD_LIBS+=usr/lib32/libctf.so.2
OLD_FILES+=usr/lib32/libctf_p.a
OLD_FILES+=usr/lib32/libdtrace.a
OLD_FILES+=usr/lib32/libdtrace.so
OLD_LIBS+=usr/lib32/libdtrace.so.2
OLD_FILES+=usr/lib32/libdtrace_p.a
OLD_FILES+=usr/lib32/libnvpair.a
OLD_FILES+=usr/lib32/libnvpair.so
OLD_LIBS+=usr/lib32/libnvpair.so.2
OLD_FILES+=usr/lib32/libnvpair_p.a
OLD_FILES+=usr/lib32/libumem.a
OLD_FILES+=usr/lib32/libumem.so
OLD_LIBS+=usr/lib32/libumem.so.2
OLD_FILES+=usr/lib32/libumem_p.a
OLD_FILES+=usr/lib32/libuutil.a
OLD_FILES+=usr/lib32/libuutil.so
OLD_LIBS+=usr/lib32/libuutil.so.2
OLD_FILES+=usr/lib32/libuutil_p.a
.endif
OLD_LIBS+=lib/libdtrace.so.2
OLD_FILES+=usr/sbin/dtrace
OLD_FILES+=usr/sbin/lockstat
OLD_FILES+=usr/sbin/plockstat
OLD_FILES+=usr/share/man/man1/dtrace.1.gz
OLD_FILES+=usr/share/man/man1/dtruss.1.gz
OLD_FILES+=usr/share/man/man1/lockstat.1.gz
OLD_FILES+=usr/share/man/man1/plockstat.1.gz
OLD_FILES+=usr/share/dtrace/disklatency
OLD_FILES+=usr/share/dtrace/disklatencycmd
OLD_FILES+=usr/share/dtrace/hotopen
OLD_FILES+=usr/share/dtrace/nfsclienttime
OLD_FILES+=usr/share/dtrace/toolkit/execsnoop
OLD_FILES+=usr/share/dtrace/toolkit/hotkernel
OLD_FILES+=usr/share/dtrace/toolkit/hotuser
OLD_FILES+=usr/share/dtrace/toolkit/opensnoop
OLD_FILES+=usr/share/dtrace/toolkit/procsystime
OLD_FILES+=usr/share/dtrace/tcpconn
OLD_FILES+=usr/share/dtrace/tcpstate
OLD_FILES+=usr/share/dtrace/tcptrack
OLD_FILES+=usr/share/dtrace/udptrack
OLD_FILES+=usr/share/man/man1/dtrace.1.gz
OLD_DIRS+=usr/lib/dtrace
OLD_DIRS+=usr/lib32/dtrace
OLD_DIRS+=usr/share/dtrace/toolkit
OLD_DIRS+=usr/share/dtrace
.endif
.if ${MK_ZFS} == no
OLD_FILES+=boot/gptzfsboot
OLD_FILES+=boot/zfsboot
OLD_FILES+=boot/zfsloader
OLD_FILES+=etc/rc.d/zfs
OLD_FILES+=etc/rc.d/zfsd
OLD_FILES+=etc/rc.d/zfsbe
OLD_FILES+=etc/rc.d/zvol
OLD_FILES+=etc/devd/zfs.conf
OLD_FILES+=etc/periodic/daily/404.status-zfs
OLD_FILES+=etc/periodic/daily/800.scrub-zfs
OLD_LIBS+=lib/libzfs.so.2
OLD_LIBS+=lib/libzfs.so.3
OLD_LIBS+=lib/libzfs_core.so.2
OLD_LIBS+=lib/libzpool.so.2
OLD_FILES+=rescue/zdb
OLD_FILES+=rescue/zfs
OLD_FILES+=rescue/zpool
OLD_FILES+=sbin/bectl
OLD_FILES+=sbin/zfs
OLD_FILES+=sbin/zpool
OLD_FILES+=sbin/zfsbootcfg
OLD_FILES+=usr/bin/zinject
OLD_FILES+=usr/bin/zstreamdump
OLD_FILES+=usr/bin/ztest
OLD_FILES+=usr/lib/libbe.a
OLD_FILES+=usr/lib/libbe_p.a
OLD_FILES+=usr/lib/libbe.so
OLD_LIBS+=lib/libbe.so.1
OLD_FILES+=usr/lib/libzfs.a
OLD_LIBS+=usr/lib/libzfs.so
OLD_FILES+=usr/lib/libzfs_core.a
OLD_LIBS+=usr/lib/libzfs_core.so
OLD_LIBS+=usr/lib/libzfs_core_p.a
OLD_FILES+=usr/lib/libzfs_p.a
OLD_FILES+=usr/lib/libzpool.a
OLD_FILES+=usr/lib/libzpool.so
OLD_LIBS+=usr/lib/libzpool.so.2
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libzfs.a
OLD_FILES+=usr/lib32/libzfs.so
OLD_LIBS+=usr/lib32/libzfs.so.2
OLD_LIBS+=usr/lib32/libzfs.so.3
OLD_FILES+=usr/lib32/libzfs_core.a
OLD_FILES+=usr/lib32/libzfs_core.so
OLD_LIBS+=usr/lib32/libzfs_core.so.2
OLD_FILES+=usr/lib32/libzfs_core_p.a
OLD_FILES+=usr/lib32/libzfs_p.a
OLD_FILES+=usr/lib32/libzpool.a
OLD_FILES+=usr/lib32/libzpool.so
OLD_LIBS+=usr/lib32/libzpool.so.2
.endif
OLD_FILES+=usr/sbin/zfsd
OLD_FILES+=usr/sbin/zhack
OLD_FILES+=usr/sbin/zdb
OLD_FILES+=usr/share/man/man3/libbe.3.gz
OLD_FILES+=usr/share/man/man7/zpool-features.7.gz
OLD_FILES+=usr/share/man/man8/bectl.8.gz
OLD_FILES+=usr/share/man/man8/gptzfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zdb.8.gz
OLD_FILES+=usr/share/man/man8/zfs-program.8.gz
OLD_FILES+=usr/share/man/man8/zfs.8.gz
OLD_FILES+=usr/share/man/man8/zfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zfsbootcfg.8.gz
OLD_FILES+=usr/share/man/man8/zfsd.8.gz
OLD_FILES+=usr/share/man/man8/zfsloader.8.gz
OLD_FILES+=usr/share/man/man8/zpool.8.gz
.endif
.if ${MK_CLANG} == no
OLD_FILES+=usr/bin/clang
OLD_FILES+=usr/bin/clang++
OLD_FILES+=usr/bin/clang-cpp
OLD_FILES+=usr/bin/clang-tblgen
OLD_FILES+=usr/bin/llvm-objdump
OLD_FILES+=usr/bin/llvm-symbolizer
OLD_FILES+=usr/bin/llvm-tblgen
-OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/algorithm
-OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/complex
-OLD_FILES+=usr/lib/clang/9.0.0/include/cuda_wrappers/new
-OLD_DIRS+=usr/lib/clang/9.0.0/include/cuda_wrappers
-OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/__clang_openmp_math_declares.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/cmath
-OLD_FILES+=usr/lib/clang/9.0.0/include/openmp_wrappers/math.h
-OLD_DIRS+=usr/lib/clang/9.0.0/include/openmp_wrappers
-OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/emmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mm_malloc.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/mmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ppc_wrappers/xmmintrin.h
-OLD_DIRS+=usr/lib/clang/9.0.0/include/ppc_wrappers
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/allocator_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/asan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/common_interface_defs.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/coverage_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/dfsan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/hwasan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/linux_syscall_hooks.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/lsan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/msan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/netbsd_syscall_hooks.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/scudo_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sanitizer/tsan_interface_atomic.h
-OLD_DIRS+=usr/lib/clang/9.0.0/include/sanitizer
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_builtin_vars.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_cmath.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_complex_builtins.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_device_functions.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_intrinsics.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_libdevice_declares.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_math_forward_declares.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__clang_cuda_runtime_wrapper.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__stddef_max_align_t.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_aes.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/__wmmintrin_pclmul.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/adxintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/altivec.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ammintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/arm64intr.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/arm_acle.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/arm_fp16.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/arm_neon.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/armintr.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx2intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bf16intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bitalgintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512bwintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512cdintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512dqintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512erintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512fintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmaintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512ifmavlintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512pfintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmi2intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmiintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vbmivlintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbf16intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbitalgintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlbwintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlcdintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vldqintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvbmi2intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvnniintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vlvp2intersectintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vnniintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vp2intersectintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avx512vpopcntdqvlintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/avxintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/bmi2intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/bmiintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/cetintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/cldemoteintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/clflushoptintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/clwbintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/clzerointrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/cpuid.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/emmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/enqcmdintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/f16cintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/float.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/fma4intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/fmaintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/fxsrintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/gfniintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/htmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/htmxlintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ia32intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/immintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/inttypes.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/invpcidintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/iso646.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/limits.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/lwpintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/lzcntintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/mm3dnow.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/mm_malloc.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/mmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/module.modulemap
-OLD_FILES+=usr/lib/clang/9.0.0/include/movdirintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/msa.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/mwaitxintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/nmmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c-base.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/opencl-c.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/pconfigintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/pkuintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/pmmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/popcntintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/prfchwintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/ptwriteintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/rdseedintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/rtmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/s390intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/sgxintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/shaintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/smmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdalign.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdarg.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdatomic.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdbool.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stddef.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdint.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/stdnoreturn.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/tbmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/tgmath.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/tmmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/unwind.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/vadefs.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/vaesintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/varargs.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/vecintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/vpclmulqdqintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/waitpkgintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/wbnoinvdintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/wmmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/x86intrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xmmintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xopintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xsavecintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xsaveoptintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xsavesintrin.h
-OLD_FILES+=usr/lib/clang/9.0.0/include/xtestintrin.h
-OLD_DIRS+=usr/lib/clang/9.0.0/include
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-aarch64.so
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-arm.so
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-armhf.so
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-i386.so
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.dd-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-basic-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-arm.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-armhf.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
-OLD_FILES+=usr/lib/clang/9.0.0/lib/freebsd/libclang_rt.xray-x86_64.a
-
-OLD_DIRS+=usr/lib/clang/9.0.0/lib/freebsd
-OLD_DIRS+=usr/lib/clang/9.0.0/lib
-OLD_DIRS+=usr/lib/clang/9.0.0
+OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/algorithm
+OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/complex
+OLD_FILES+=usr/lib/clang/9.0.1/include/cuda_wrappers/new
+OLD_DIRS+=usr/lib/clang/9.0.1/include/cuda_wrappers
+OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/__clang_openmp_math.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/cmath
+OLD_FILES+=usr/lib/clang/9.0.1/include/openmp_wrappers/math.h
+OLD_DIRS+=usr/lib/clang/9.0.1/include/openmp_wrappers
+OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/emmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/mm_malloc.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/mmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ppc_wrappers/xmmintrin.h
+OLD_DIRS+=usr/lib/clang/9.0.1/include/ppc_wrappers
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/allocator_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/asan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/common_interface_defs.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/coverage_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/dfsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/hwasan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/linux_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/lsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/msan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/netbsd_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/scudo_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/tsan_interface.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sanitizer/tsan_interface_atomic.h
+OLD_DIRS+=usr/lib/clang/9.0.1/include/sanitizer
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_builtin_vars.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_cmath.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_complex_builtins.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_device_functions.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_intrinsics.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_libdevice_declares.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_math_forward_declares.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__clang_cuda_runtime_wrapper.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__stddef_max_align_t.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__wmmintrin_aes.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/__wmmintrin_pclmul.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/adxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/altivec.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ammintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/arm64intr.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/arm_acle.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/arm_fp16.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/arm_neon.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/armintr.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bf16intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bitalgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512bwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512cdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512dqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512erintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512fintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512ifmaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512ifmavlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512pfintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmiintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vbmivlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbf16intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbitalgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlbwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlcdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vldqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvbmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvnniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vlvp2intersectintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vnniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vp2intersectintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vpopcntdqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avx512vpopcntdqvlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/avxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/bmi2intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/bmiintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/cetintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/cldemoteintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/clflushoptintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/clwbintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/clzerointrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/cpuid.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/emmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/enqcmdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/f16cintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/float.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/fma4intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/fmaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/fxsrintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/gfniintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/htmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/htmxlintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ia32intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/immintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/inttypes.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/invpcidintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/iso646.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/limits.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/lwpintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/lzcntintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/mm3dnow.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/mm_malloc.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/mmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/module.modulemap
+OLD_FILES+=usr/lib/clang/9.0.1/include/movdirintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/msa.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/mwaitxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/nmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/opencl-c-base.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/opencl-c.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/pconfigintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/pkuintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/pmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/popcntintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/prfchwintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/ptwriteintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/rdseedintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/rtmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/s390intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/sgxintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/shaintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/smmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdalign.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdarg.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdatomic.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdbool.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stddef.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdint.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/stdnoreturn.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/tbmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/tgmath.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/tmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/unwind.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/vadefs.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/vaesintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/varargs.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/vecintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/vpclmulqdqintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/waitpkgintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/wbnoinvdintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/wmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/x86intrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xmmintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xopintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xsavecintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xsaveintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xsaveoptintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xsavesintrin.h
+OLD_FILES+=usr/lib/clang/9.0.1/include/xtestintrin.h
+OLD_DIRS+=usr/lib/clang/9.0.1/include
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-aarch64.so
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-arm.so
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-armhf.so
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-i386.so
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.cfi_diag-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.dd-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.dd-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.fuzzer_no_main-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-powerpc.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-powerpc64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-basic-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-fdr-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-aarch64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-arm.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-armhf.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-profiling-x86_64.a
+OLD_FILES+=usr/lib/clang/9.0.1/lib/freebsd/libclang_rt.xray-x86_64.a
+OLD_DIRS+=usr/lib/clang/9.0.1/lib/freebsd
+OLD_DIRS+=usr/lib/clang/9.0.1/lib
+OLD_DIRS+=usr/lib/clang/9.0.1
OLD_DIRS+=usr/lib/clang
OLD_FILES+=usr/share/doc/llvm/clang/LICENSE.TXT
OLD_DIRS+=usr/share/doc/llvm/clang
OLD_FILES+=usr/share/doc/llvm/COPYRIGHT.regex
OLD_FILES+=usr/share/doc/llvm/LICENSE.TXT
OLD_DIRS+=usr/share/doc/llvm
OLD_FILES+=usr/share/man/man1/clang.1.gz
OLD_FILES+=usr/share/man/man1/clang++.1.gz
OLD_FILES+=usr/share/man/man1/clang-cpp.1.gz
OLD_FILES+=usr/share/man/man1/llvm-symbolizer.1.gz
OLD_FILES+=usr/share/man/man1/llvm-tblgen.1.gz
.endif
.if ${MK_CLANG_EXTRAS} == no
OLD_FILES+=usr/bin/bugpoint
OLD_FILES+=usr/bin/clang-format
OLD_FILES+=usr/bin/llc
OLD_FILES+=usr/bin/lli
OLD_FILES+=usr/bin/llvm-ar
OLD_FILES+=usr/bin/llvm-as
OLD_FILES+=usr/bin/llvm-bcanalyzer
OLD_FILES+=usr/bin/llvm-cxxdump
OLD_FILES+=usr/bin/llvm-cxxfilt
OLD_FILES+=usr/bin/llvm-diff
OLD_FILES+=usr/bin/llvm-dis
OLD_FILES+=usr/bin/llvm-dwarfdump
OLD_FILES+=usr/bin/llvm-extract
OLD_FILES+=usr/bin/llvm-link
OLD_FILES+=usr/bin/llvm-lto
OLD_FILES+=usr/bin/llvm-lto2
OLD_FILES+=usr/bin/llvm-mc
OLD_FILES+=usr/bin/llvm-mca
OLD_FILES+=usr/bin/llvm-modextract
OLD_FILES+=usr/bin/llvm-nm
OLD_FILES+=usr/bin/llvm-objcopy
OLD_FILES+=usr/bin/llvm-pdbutil
OLD_FILES+=usr/bin/llvm-ranlib
OLD_FILES+=usr/bin/llvm-rtdyld
OLD_FILES+=usr/bin/llvm-xray
OLD_FILES+=usr/bin/opt
OLD_FILES+=usr/share/man/man1/bugpoint.1.gz
OLD_FILES+=usr/share/man/man1/llc.1.gz
OLD_FILES+=usr/share/man/man1/lli.1.gz
OLD_FILES+=usr/share/man/man1/llvm-ar.1.gz
OLD_FILES+=usr/share/man/man1/llvm-as.1.gz
OLD_FILES+=usr/share/man/man1/llvm-bcanalyzer.1.gz
OLD_FILES+=usr/share/man/man1/llvm-diff.1.gz
OLD_FILES+=usr/share/man/man1/llvm-dis.1.gz
OLD_FILES+=usr/share/man/man1/llvm-dwarfdump.1
OLD_FILES+=usr/share/man/man1/llvm-extract.1.gz
OLD_FILES+=usr/share/man/man1/llvm-link.1.gz
OLD_FILES+=usr/share/man/man1/llvm-nm.1.gz
OLD_FILES+=usr/share/man/man1/llvm-pdbutil.1.gz
OLD_FILES+=usr/share/man/man1/opt.1.gz
.endif
.if ${MK_CPP} == no
OLD_FILES+=usr/bin/cpp
OLD_FILES+=usr/share/man/man1/cpp.1.gz
.endif
.if ${MK_CTM} == no
OLD_FILES+=usr/sbin/ctm
OLD_FILES+=usr/sbin/ctm_dequeue
OLD_FILES+=usr/sbin/ctm_rmail
OLD_FILES+=usr/sbin/ctm_smail
OLD_FILES+=usr/share/man/man1/ctm.1.gz
OLD_FILES+=usr/share/man/man1/ctm_dequeue.1.gz
OLD_FILES+=usr/share/man/man1/ctm_rmail.1.gz
OLD_FILES+=usr/share/man/man1/ctm_smail.1.gz
OLD_FILES+=usr/share/man/man5/ctm.5.gz
.endif
.if ${MK_CUSE} == no
OLD_FILES+=usr/include/fs/cuse/cuse_defs.h
OLD_FILES+=usr/include/fs/cuse/cuse_ioctl.h
OLD_FILES+=usr/include/cuse.h
OLD_FILES+=usr/lib/libcuse.a
OLD_LIBS+=usr/lib/libcuse.so.1
OLD_FILES+=usr/lib/libcuse_p.a
OLD_FILES+=usr/share/man/man3/cuse.3.gz
OLD_FILES+=usr/share/man/man3/cuse_alloc_unit_number.3.gz
OLD_FILES+=usr/share/man/man3/cuse_alloc_unit_number_by_id.3.gz
OLD_FILES+=usr/share/man/man3/cuse_copy_in.3.gz
OLD_FILES+=usr/share/man/man3/cuse_copy_out.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_create.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_destroy.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_current.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_per_file_handle.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_priv0.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_priv1.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_per_file_handle.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_priv0.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_priv1.3.gz
OLD_FILES+=usr/share/man/man3/cuse_free_unit_number.3.gz
OLD_FILES+=usr/share/man/man3/cuse_free_unit_number_by_id.3.gz
OLD_FILES+=usr/share/man/man3/cuse_get_local.3.gz
OLD_FILES+=usr/share/man/man3/cuse_got_peer_signal.3.gz
OLD_FILES+=usr/share/man/man3/cuse_init.3.gz
OLD_FILES+=usr/share/man/man3/cuse_is_vmalloc_addr.3.gz
OLD_FILES+=usr/share/man/man3/cuse_poll_wakeup.3.gz
OLD_FILES+=usr/share/man/man3/cuse_set_local.3.gz
OLD_FILES+=usr/share/man/man3/cuse_uninit.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmalloc.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmfree.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmoffset.3.gz
OLD_FILES+=usr/share/man/man3/cuse_wait_and_process.3.gz
OLD_DIRS+=usr/include/fs/cuse
.endif
# devd(8) not listed here on purpose
.if ${MK_CXX} == no
OLD_FILES+=usr/bin/CC
OLD_FILES+=usr/bin/c++
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/libexec/cc1plus
.endif
.if ${MK_DEBUG_FILES} == no
.if exists(${DESTDIR}/usr/lib/debug)
DEBUG_DIRS!=find ${DESTDIR}/usr/lib/debug -mindepth 1 \
-type d \! -path "${DESTDIR}/usr/lib/debug/boot/*" \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_FILES!=find ${DESTDIR}/usr/lib/debug \
\! -type d \! -path "${DESTDIR}/usr/lib/debug/boot/*" \! -name "lib*.so*" \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIBS!=find ${DESTDIR}/usr/lib/debug \! -type d -name "lib*.so*" \
| sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${DEBUG_DIRS}
OLD_FILES+=${DEBUG_FILES}
OLD_LIBS+=${DEBUG_LIBS}
.endif
.endif
.if ${MK_DIALOG} == no
OLD_FILES+=usr/bin/dialog
OLD_FILES+=usr/bin/dpv
OLD_FILES+=usr/lib/libdialog.a
OLD_FILES+=usr/lib/libdialog.so
OLD_FILES+=usr/lib/libdialog.so.8
OLD_FILES+=usr/lib/libdialog_p.a
OLD_FILES+=usr/lib/libdpv.a
OLD_FILES+=usr/lib/libdpv.so
OLD_FILES+=usr/lib/libdpv.so.1
OLD_FILES+=usr/lib/libdpv_p.a
OLD_FILES+=usr/sbin/bsdconfig
OLD_FILES+=usr/share/man/man1/dialog.1.gz
OLD_FILES+=usr/share/man/man1/dpv.1.gz
OLD_FILES+=usr/share/man/man3/dialog.3.gz
OLD_FILES+=usr/share/man/man3/dpv.3.gz
OLD_FILES+=usr/share/man/man8/bsdconfig.8.gz
OLD_DIRS+=usr/share/bsdconfig
OLD_DIRS+=usr/share/bsdconfig/media
OLD_DIRS+=usr/share/bsdconfig/networking
OLD_DIRS+=usr/share/bsdconfig/packages
OLD_DIRS+=usr/share/bsdconfig/password
OLD_DIRS+=usr/share/bsdconfig/startup
OLD_DIRS+=usr/share/bsdconfig/timezone
OLD_DIRS+=usr/share/bsdconfig/usermgmt
.endif
.if ${MK_EFI} == no
OLD_FILES+=usr/sbin/efibootmgr
OLD_FILES+=usr/sbin/efidp
OLD_FILES+=usr/sbin/efivar
OLD_FILES+=usr/sbin/uefisign
OLD_FILES+=usr/share/examples/uefisign/uefikeys
.endif
.if ${MK_FMTREE} == no
OLD_FILES+=usr/sbin/fmtree
OLD_FILES+=usr/share/man/man8/fmtree.8.gz
.endif
.if ${MK_FTP} == no
OLD_FILES+=etc/ftpusers
OLD_FILES+=etc/newsyslog.conf.d/ftp.conf
OLD_FILES+=etc/pam.d/ftp
OLD_FILES+=etc/pam.d/ftpd
OLD_FILES+=etc/rc.d/ftpd
OLD_FILES+=etc/syslog.d/ftp.conf
OLD_FILES+=usr/bin/ftp
OLD_FILES+=usr/bin/gate-ftp
OLD_FILES+=usr/bin/pftp
OLD_FILES+=usr/libexec/ftpd
OLD_FILES+=usr/share/man/man1/ftp.1.gz
OLD_FILES+=usr/share/man/man1/gate-ftp.1.gz
OLD_FILES+=usr/share/man/man1/pftp.1.gz
OLD_FILES+=usr/share/man/man5/ftpchroot.5.gz
OLD_FILES+=usr/share/man/man8/ftpd.8.gz
.endif
.if ${MK_GNUCXX} == no
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/include/c++/4.2/algorithm
OLD_FILES+=usr/include/c++/4.2/backward/algo.h
OLD_FILES+=usr/include/c++/4.2/backward/algobase.h
OLD_FILES+=usr/include/c++/4.2/backward/alloc.h
OLD_FILES+=usr/include/c++/4.2/backward/backward_warning.h
OLD_FILES+=usr/include/c++/4.2/backward/bvector.h
OLD_FILES+=usr/include/c++/4.2/backward/complex.h
OLD_FILES+=usr/include/c++/4.2/backward/defalloc.h
OLD_FILES+=usr/include/c++/4.2/backward/deque.h
OLD_FILES+=usr/include/c++/4.2/backward/fstream.h
OLD_FILES+=usr/include/c++/4.2/backward/function.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_map.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_set.h
OLD_FILES+=usr/include/c++/4.2/backward/hashtable.h
OLD_FILES+=usr/include/c++/4.2/backward/heap.h
OLD_FILES+=usr/include/c++/4.2/backward/iomanip.h
OLD_FILES+=usr/include/c++/4.2/backward/iostream.h
OLD_FILES+=usr/include/c++/4.2/backward/istream.h
OLD_FILES+=usr/include/c++/4.2/backward/iterator.h
OLD_FILES+=usr/include/c++/4.2/backward/list.h
OLD_FILES+=usr/include/c++/4.2/backward/map.h
OLD_FILES+=usr/include/c++/4.2/backward/multimap.h
OLD_FILES+=usr/include/c++/4.2/backward/multiset.h
OLD_FILES+=usr/include/c++/4.2/backward/new.h
OLD_FILES+=usr/include/c++/4.2/backward/ostream.h
OLD_FILES+=usr/include/c++/4.2/backward/pair.h
OLD_FILES+=usr/include/c++/4.2/backward/queue.h
OLD_FILES+=usr/include/c++/4.2/backward/rope.h
OLD_FILES+=usr/include/c++/4.2/backward/set.h
OLD_FILES+=usr/include/c++/4.2/backward/slist.h
OLD_FILES+=usr/include/c++/4.2/backward/stack.h
OLD_FILES+=usr/include/c++/4.2/backward/stream.h
OLD_FILES+=usr/include/c++/4.2/backward/streambuf.h
OLD_FILES+=usr/include/c++/4.2/backward/strstream
OLD_FILES+=usr/include/c++/4.2/backward/tempbuf.h
OLD_FILES+=usr/include/c++/4.2/backward/tree.h
OLD_FILES+=usr/include/c++/4.2/backward/vector.h
OLD_FILES+=usr/include/c++/4.2/bits/allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/atomic_word.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_file.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/4.2/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/c++allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/c++config.h
OLD_FILES+=usr/include/c++/4.2/bits/c++io.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/4.2/bits/char_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cmath.tcc
OLD_FILES+=usr/include/c++/4.2/bits/codecvt.h
OLD_FILES+=usr/include/c++/4.2/bits/compatibility.h
OLD_FILES+=usr/include/c++/4.2/bits/concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cpu_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_base.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/4.2/bits/cxxabi_tweaks.h
OLD_FILES+=usr/include/c++/4.2/bits/deque.tcc
OLD_FILES+=usr/include/c++/4.2/bits/fstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/functexcept.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-default.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-single.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-tpf.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr.h
OLD_FILES+=usr/include/c++/4.2/bits/indirect_array.h
OLD_FILES+=usr/include/c++/4.2/bits/ios_base.h
OLD_FILES+=usr/include/c++/4.2/bits/istream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/list.tcc
OLD_FILES+=usr/include/c++/4.2/bits/locale_classes.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/4.2/bits/localefwd.h
OLD_FILES+=usr/include/c++/4.2/bits/mask_array.h
OLD_FILES+=usr/include/c++/4.2/bits/messages_members.h
OLD_FILES+=usr/include/c++/4.2/bits/os_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ostream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/ostream_insert.h
OLD_FILES+=usr/include/c++/4.2/bits/postypes.h
OLD_FILES+=usr/include/c++/4.2/bits/slice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/sstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/stl_algo.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_construct.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_deque.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_function.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_heap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_list.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_map.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_pair.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_queue.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_relops.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_set.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_stack.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tree.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_vector.h
OLD_FILES+=usr/include/c++/4.2/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/4.2/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stringfwd.h
OLD_FILES+=usr/include/c++/4.2/bits/time_members.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_after.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/4.2/bits/valarray_before.h
OLD_FILES+=usr/include/c++/4.2/bits/vector.tcc
OLD_FILES+=usr/include/c++/4.2/bitset
OLD_FILES+=usr/include/c++/4.2/cassert
OLD_FILES+=usr/include/c++/4.2/cctype
OLD_FILES+=usr/include/c++/4.2/cerrno
OLD_FILES+=usr/include/c++/4.2/cfloat
OLD_FILES+=usr/include/c++/4.2/ciso646
OLD_FILES+=usr/include/c++/4.2/climits
OLD_FILES+=usr/include/c++/4.2/clocale
OLD_FILES+=usr/include/c++/4.2/cmath
OLD_FILES+=usr/include/c++/4.2/complex
OLD_FILES+=usr/include/c++/4.2/csetjmp
OLD_FILES+=usr/include/c++/4.2/csignal
OLD_FILES+=usr/include/c++/4.2/cstdarg
OLD_FILES+=usr/include/c++/4.2/cstddef
OLD_FILES+=usr/include/c++/4.2/cstdio
OLD_FILES+=usr/include/c++/4.2/cstdlib
OLD_FILES+=usr/include/c++/4.2/cstring
OLD_FILES+=usr/include/c++/4.2/ctime
OLD_FILES+=usr/include/c++/4.2/cwchar
OLD_FILES+=usr/include/c++/4.2/cwctype
OLD_FILES+=usr/include/c++/4.2/cxxabi.h
OLD_FILES+=usr/include/c++/4.2/debug/bitset
OLD_FILES+=usr/include/c++/4.2/debug/debug.h
OLD_FILES+=usr/include/c++/4.2/debug/deque
OLD_FILES+=usr/include/c++/4.2/debug/formatter.h
OLD_FILES+=usr/include/c++/4.2/debug/functions.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_map
OLD_FILES+=usr/include/c++/4.2/debug/hash_map.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_set
OLD_FILES+=usr/include/c++/4.2/debug/hash_set.h
OLD_FILES+=usr/include/c++/4.2/debug/list
OLD_FILES+=usr/include/c++/4.2/debug/macros.h
OLD_FILES+=usr/include/c++/4.2/debug/map
OLD_FILES+=usr/include/c++/4.2/debug/map.h
OLD_FILES+=usr/include/c++/4.2/debug/multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_base.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/4.2/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/4.2/debug/set
OLD_FILES+=usr/include/c++/4.2/debug/set.h
OLD_FILES+=usr/include/c++/4.2/debug/string
OLD_FILES+=usr/include/c++/4.2/debug/vector
OLD_FILES+=usr/include/c++/4.2/deque
OLD_FILES+=usr/include/c++/4.2/exception
OLD_FILES+=usr/include/c++/4.2/exception_defines.h
OLD_FILES+=usr/include/c++/4.2/ext/algorithm
OLD_FILES+=usr/include/c++/4.2/ext/array_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/atomicity.h
OLD_FILES+=usr/include/c++/4.2/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/codecvt_specializations.h
OLD_FILES+=usr/include/c++/4.2/ext/concurrence.h
OLD_FILES+=usr/include/c++/4.2/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/functional
OLD_FILES+=usr/include/c++/4.2/ext/hash_fun.h
OLD_FILES+=usr/include/c++/4.2/ext/hash_map
OLD_FILES+=usr/include/c++/4.2/ext/hash_set
OLD_FILES+=usr/include/c++/4.2/ext/hashtable.h
OLD_FILES+=usr/include/c++/4.2/ext/iterator
OLD_FILES+=usr/include/c++/4.2/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/memory
OLD_FILES+=usr/include/c++/4.2/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/new_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/numeric
OLD_FILES+=usr/include/c++/4.2/ext/numeric_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/assoc_container.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/basic_tree_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/null_node_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_types.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/bin_search_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/binary_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_cmp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_pred.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/binomial_heap_base_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cc_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cmp_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/entry_list_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/size_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cond_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/container_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/eq_by_less.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/hash_eq_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/gp_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/iterator_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mask_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mod_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/linear_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mask_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mod_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/probe_fn_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/quadratic_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/left_child_next_sibling_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/null_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/entry_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/lu_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/mtf_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/sample_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/map_debug_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/cond_dtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/ov_tree_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/pairing_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/const_child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/head.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/insert_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/internal_node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/leaf.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/pat_trie_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_join_branch_bag.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/synth_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/update_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/priority_queue_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/rb_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc_binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/cc_hash_max_collision_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_exponential_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_size_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_prime_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_standard_resize_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_trigger.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_size_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/thin_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/sample_tree_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_trace_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/prefix_search_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/string_trie_e_access_traits_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/trie_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/type_utils.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/types_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/exception.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/hash_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/list_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/priority_queue.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tag_and_trait.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tree_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/trie_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/rb_tree
OLD_FILES+=usr/include/c++/4.2/ext/rc_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/rope
OLD_FILES+=usr/include/c++/4.2/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/4.2/ext/slist
OLD_FILES+=usr/include/c++/4.2/ext/sso_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/throw_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/type_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/typelist.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.tcc
OLD_FILES+=usr/include/c++/4.2/ext/vstring_fwd.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring_util.h
OLD_FILES+=usr/include/c++/4.2/fstream
OLD_FILES+=usr/include/c++/4.2/functional
OLD_FILES+=usr/include/c++/4.2/iomanip
OLD_FILES+=usr/include/c++/4.2/ios
OLD_FILES+=usr/include/c++/4.2/iosfwd
OLD_FILES+=usr/include/c++/4.2/iostream
OLD_FILES+=usr/include/c++/4.2/istream
OLD_FILES+=usr/include/c++/4.2/iterator
OLD_FILES+=usr/include/c++/4.2/limits
OLD_FILES+=usr/include/c++/4.2/list
OLD_FILES+=usr/include/c++/4.2/locale
OLD_FILES+=usr/include/c++/4.2/map
OLD_FILES+=usr/include/c++/4.2/memory
OLD_FILES+=usr/include/c++/4.2/new
OLD_FILES+=usr/include/c++/4.2/numeric
OLD_FILES+=usr/include/c++/4.2/ostream
OLD_FILES+=usr/include/c++/4.2/queue
OLD_FILES+=usr/include/c++/4.2/set
OLD_FILES+=usr/include/c++/4.2/sstream
OLD_FILES+=usr/include/c++/4.2/stack
OLD_FILES+=usr/include/c++/4.2/stdexcept
OLD_FILES+=usr/include/c++/4.2/streambuf
OLD_FILES+=usr/include/c++/4.2/string
OLD_FILES+=usr/include/c++/4.2/tr1/array
OLD_FILES+=usr/include/c++/4.2/tr1/bind_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/bind_repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/boost_shared_ptr.h
OLD_FILES+=usr/include/c++/4.2/tr1/cctype
OLD_FILES+=usr/include/c++/4.2/tr1/cfenv
OLD_FILES+=usr/include/c++/4.2/tr1/cfloat
OLD_FILES+=usr/include/c++/4.2/tr1/cinttypes
OLD_FILES+=usr/include/c++/4.2/tr1/climits
OLD_FILES+=usr/include/c++/4.2/tr1/cmath
OLD_FILES+=usr/include/c++/4.2/tr1/common.h
OLD_FILES+=usr/include/c++/4.2/tr1/complex
OLD_FILES+=usr/include/c++/4.2/tr1/cstdarg
OLD_FILES+=usr/include/c++/4.2/tr1/cstdbool
OLD_FILES+=usr/include/c++/4.2/tr1/cstdint
OLD_FILES+=usr/include/c++/4.2/tr1/cstdio
OLD_FILES+=usr/include/c++/4.2/tr1/cstdlib
OLD_FILES+=usr/include/c++/4.2/tr1/ctgmath
OLD_FILES+=usr/include/c++/4.2/tr1/ctime
OLD_FILES+=usr/include/c++/4.2/tr1/ctype.h
OLD_FILES+=usr/include/c++/4.2/tr1/cwchar
OLD_FILES+=usr/include/c++/4.2/tr1/cwctype
OLD_FILES+=usr/include/c++/4.2/tr1/fenv.h
OLD_FILES+=usr/include/c++/4.2/tr1/float.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional
OLD_FILES+=usr/include/c++/4.2/tr1/functional_hash.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable_policy.h
OLD_FILES+=usr/include/c++/4.2/tr1/inttypes.h
OLD_FILES+=usr/include/c++/4.2/tr1/limits.h
OLD_FILES+=usr/include/c++/4.2/tr1/math.h
OLD_FILES+=usr/include/c++/4.2/tr1/memory
OLD_FILES+=usr/include/c++/4.2/tr1/mu_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/random
OLD_FILES+=usr/include/c++/4.2/tr1/random.tcc
OLD_FILES+=usr/include/c++/4.2/tr1/ref_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/ref_wrap_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdarg.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdbool.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdint.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdio.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdlib.h
OLD_FILES+=usr/include/c++/4.2/tr1/tgmath.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_defs.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_map
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_set
OLD_FILES+=usr/include/c++/4.2/tr1/utility
OLD_FILES+=usr/include/c++/4.2/tr1/wchar.h
OLD_FILES+=usr/include/c++/4.2/tr1/wctype.h
OLD_FILES+=usr/include/c++/4.2/typeinfo
OLD_FILES+=usr/include/c++/4.2/utility
OLD_FILES+=usr/include/c++/4.2/valarray
OLD_FILES+=usr/include/c++/4.2/vector
OLD_FILES+=usr/lib/libstdc++.a
OLD_FILES+=usr/lib/libstdc++.so
OLD_LIBS+=usr/lib/libstdc++.so.6
OLD_FILES+=usr/lib/libstdc++_p.a
OLD_FILES+=usr/lib/libsupc++.a
OLD_FILES+=usr/lib/libsupc++.so
OLD_LIBS+=usr/lib/libsupc++.so.1
OLD_FILES+=usr/lib/libsupc++_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libstdc++.a
OLD_FILES+=usr/lib32/libstdc++.so
OLD_LIBS+=usr/lib32/libstdc++.so.6
OLD_FILES+=usr/lib32/libstdc++_p.a
OLD_FILES+=usr/lib32/libsupc++.a
OLD_FILES+=usr/lib32/libsupc++.so
OLD_LIBS+=usr/lib32/libsupc++.so.1
OLD_FILES+=usr/lib32/libsupc++_p.a
.endif
OLD_FILES+=usr/libexec/cc1plus
.endif
.if ${MK_DICT} == no
OLD_FILES+=usr/share/dict/README
OLD_FILES+=usr/share/dict/eign
OLD_FILES+=usr/share/dict/freebsd
OLD_FILES+=usr/share/dict/propernames
OLD_FILES+=usr/share/dict/web2
OLD_FILES+=usr/share/dict/web2a
OLD_FILES+=usr/share/dict/words
OLD_DIRS+=usr/share/dict
.endif
.if ${MK_DMAGENT} == no
OLD_FILES+=etc/dma/dma.conf
OLD_FILES+=usr/libexec/dma
OLD_FILES+=usr/libexec/dma-mbox-create
OLD_FILES+=usr/share/man/man8/dma.8.gz
OLD_FILES+=usr/share/examples/dma/mailer.conf
.endif
.if ${MK_EE} == no
OLD_FILES+=usr/bin/edit
OLD_FILES+=usr/bin/ee
OLD_FILES+=usr/bin/ree
OLD_FILES+=usr/share/man/man1/edit.1.gz
OLD_FILES+=usr/share/man/man1/ee.1.gz
OLD_FILES+=usr/share/man/man1/ree.1.gz
OLD_FILES+=usr/share/nls/C/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/ee.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/ee.cat
.endif
.if ${MK_EXAMPLES} == no
OLD_FILES+=usr/share/examples/BSD_daemon/FreeBSD.pfa
OLD_FILES+=usr/share/examples/BSD_daemon/README
OLD_FILES+=usr/share/examples/BSD_daemon/beastie.eps
OLD_FILES+=usr/share/examples/BSD_daemon/beastie.fig
OLD_FILES+=usr/share/examples/BSD_daemon/eps.patch
OLD_FILES+=usr/share/examples/BSD_daemon/poster.sh
OLD_FILES+=usr/share/examples/FreeBSD_version/FreeBSD_version.c
OLD_FILES+=usr/share/examples/FreeBSD_version/Makefile
OLD_FILES+=usr/share/examples/FreeBSD_version/README
OLD_FILES+=usr/share/examples/IPv6/USAGE
OLD_FILES+=usr/share/examples/bhyve/vmrun.sh
OLD_FILES+=usr/share/examples/bootforth/README
OLD_FILES+=usr/share/examples/bootforth/boot.4th
OLD_FILES+=usr/share/examples/bootforth/frames.4th
OLD_FILES+=usr/share/examples/bootforth/loader.rc
OLD_FILES+=usr/share/examples/bootforth/menu.4th
OLD_FILES+=usr/share/examples/bootforth/menuconf.4th
OLD_FILES+=usr/share/examples/bootforth/screen.4th
OLD_FILES+=usr/share/examples/bsdconfig/add_some_packages.sh
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_http.sh
OLD_FILES+=usr/share/examples/bsdconfig/bsdconfigrc
OLD_FILES+=usr/share/examples/csh/dot.cshrc
OLD_FILES+=usr/share/examples/diskless/ME
OLD_FILES+=usr/share/examples/diskless/README.BOOTP
OLD_FILES+=usr/share/examples/diskless/README.TEMPLATING
OLD_FILES+=usr/share/examples/diskless/clone_root
OLD_FILES+=usr/share/examples/dma/mailer.conf
OLD_FILES+=usr/share/examples/drivers/README
OLD_FILES+=usr/share/examples/drivers/make_device_driver.sh
OLD_FILES+=usr/share/examples/drivers/make_pseudo_driver.sh
OLD_FILES+=usr/share/examples/dwatch/profile_template
OLD_FILES+=usr/share/examples/etc/README.examples
OLD_FILES+=usr/share/examples/etc/bsd-style-copyright
OLD_FILES+=usr/share/examples/etc/group
OLD_FILES+=usr/share/examples/etc/login.access
OLD_FILES+=usr/share/examples/etc/make.conf
OLD_FILES+=usr/share/examples/etc/rc.bsdextended
OLD_FILES+=usr/share/examples/etc/rc.firewall
OLD_FILES+=usr/share/examples/etc/rc.sendmail
OLD_FILES+=usr/share/examples/etc/termcap.small
OLD_FILES+=usr/share/examples/etc/wpa_supplicant.conf
OLD_FILES+=usr/share/examples/find_interface/Makefile
OLD_FILES+=usr/share/examples/find_interface/README
OLD_FILES+=usr/share/examples/find_interface/find_interface.c
OLD_FILES+=usr/share/examples/hast/ucarp.sh
OLD_FILES+=usr/share/examples/hast/ucarp_down.sh
OLD_FILES+=usr/share/examples/hast/ucarp_up.sh
OLD_FILES+=usr/share/examples/hast/vip-down.sh
OLD_FILES+=usr/share/examples/hast/vip-up.sh
OLD_FILES+=usr/share/examples/hostapd/hostapd.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.eap_user
OLD_FILES+=usr/share/examples/hostapd/hostapd.wpa_psk
OLD_FILES+=usr/share/examples/indent/indent.pro
OLD_FILES+=usr/share/examples/ipfilter/BASIC.NAT
OLD_FILES+=usr/share/examples/ipfilter/BASIC_1.FW
OLD_FILES+=usr/share/examples/ipfilter/BASIC_2.FW
OLD_FILES+=usr/share/examples/ipfilter/README
OLD_FILES+=usr/share/examples/ipfilter/example.1
OLD_FILES+=usr/share/examples/ipfilter/example.10
OLD_FILES+=usr/share/examples/ipfilter/example.11
OLD_FILES+=usr/share/examples/ipfilter/example.12
OLD_FILES+=usr/share/examples/ipfilter/example.13
OLD_FILES+=usr/share/examples/ipfilter/example.14
OLD_FILES+=usr/share/examples/ipfilter/example.2
OLD_FILES+=usr/share/examples/ipfilter/example.3
OLD_FILES+=usr/share/examples/ipfilter/example.4
OLD_FILES+=usr/share/examples/ipfilter/example.5
OLD_FILES+=usr/share/examples/ipfilter/example.6
OLD_FILES+=usr/share/examples/ipfilter/example.7
OLD_FILES+=usr/share/examples/ipfilter/example.8
OLD_FILES+=usr/share/examples/ipfilter/example.9
OLD_FILES+=usr/share/examples/ipfilter/example.sr
OLD_FILES+=usr/share/examples/ipfilter/examples.txt
OLD_FILES+=usr/share/examples/ipfilter/firewall
OLD_FILES+=usr/share/examples/ipfilter/firewall.1
OLD_FILES+=usr/share/examples/ipfilter/firewall.2
OLD_FILES+=usr/share/examples/ipfilter/ftp-proxy
OLD_FILES+=usr/share/examples/ipfilter/ftppxy
OLD_FILES+=usr/share/examples/ipfilter/ipf-howto.txt
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.permissive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.restrictive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/ipnat.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/mkfilters
OLD_FILES+=usr/share/examples/ipfilter/nat-setup
OLD_FILES+=usr/share/examples/ipfilter/nat.eg
OLD_FILES+=usr/share/examples/ipfilter/rules.txt
OLD_FILES+=usr/share/examples/ipfilter/server
OLD_FILES+=usr/share/examples/ipfilter/tcpstate
OLD_FILES+=usr/share/examples/ipfw/change_rules.sh
OLD_FILES+=usr/share/examples/jails/README
OLD_FILES+=usr/share/examples/jails/VIMAGE
OLD_FILES+=usr/share/examples/jails/jail.xxx.conf
OLD_FILES+=usr/share/examples/jails/jib
OLD_FILES+=usr/share/examples/jails/jng
OLD_FILES+=usr/share/examples/jails/rc.conf.jails
OLD_FILES+=usr/share/examples/jails/rcjail.xxx.conf
OLD_FILES+=usr/share/examples/kld/Makefile
OLD_FILES+=usr/share/examples/kld/cdev/Makefile
OLD_FILES+=usr/share/examples/kld/cdev/README
OLD_FILES+=usr/share/examples/kld/cdev/module/Makefile
OLD_FILES+=usr/share/examples/kld/cdev/module/cdev.c
OLD_FILES+=usr/share/examples/kld/cdev/module/cdev.h
OLD_FILES+=usr/share/examples/kld/cdev/module/cdevmod.c
OLD_FILES+=usr/share/examples/kld/cdev/test/Makefile
OLD_FILES+=usr/share/examples/kld/cdev/test/testcdev.c
OLD_FILES+=usr/share/examples/kld/dyn_sysctl/Makefile
OLD_FILES+=usr/share/examples/kld/dyn_sysctl/README
OLD_FILES+=usr/share/examples/kld/dyn_sysctl/dyn_sysctl.c
OLD_FILES+=usr/share/examples/kld/firmware/Makefile
OLD_FILES+=usr/share/examples/kld/firmware/README
OLD_FILES+=usr/share/examples/kld/firmware/fwconsumer/Makefile
OLD_FILES+=usr/share/examples/kld/firmware/fwconsumer/fw_consumer.c
OLD_FILES+=usr/share/examples/kld/firmware/fwimage/Makefile
OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img.uu
OLD_FILES+=usr/share/examples/kld/khelp/Makefile
OLD_FILES+=usr/share/examples/kld/khelp/README
OLD_FILES+=usr/share/examples/kld/khelp/h_example.c
OLD_FILES+=usr/share/examples/kld/syscall/Makefile
OLD_FILES+=usr/share/examples/kld/syscall/module/Makefile
OLD_FILES+=usr/share/examples/kld/syscall/module/syscall.c
OLD_FILES+=usr/share/examples/kld/syscall/test/Makefile
OLD_FILES+=usr/share/examples/kld/syscall/test/call.c
OLD_FILES+=usr/share/examples/libusb20/Makefile
OLD_FILES+=usr/share/examples/libusb20/README
OLD_FILES+=usr/share/examples/libusb20/bulk.c
OLD_FILES+=usr/share/examples/libusb20/control.c
OLD_FILES+=usr/share/examples/libusb20/util.c
OLD_FILES+=usr/share/examples/libusb20/util.h
OLD_FILES+=usr/share/examples/libvgl/Makefile
OLD_FILES+=usr/share/examples/libvgl/demo.c
OLD_FILES+=usr/share/examples/mdoc/POSIX-copyright
OLD_FILES+=usr/share/examples/mdoc/deshallify.sh
OLD_FILES+=usr/share/examples/mdoc/example.1
OLD_FILES+=usr/share/examples/mdoc/example.3
OLD_FILES+=usr/share/examples/mdoc/example.4
OLD_FILES+=usr/share/examples/mdoc/example.9
OLD_FILES+=usr/share/examples/netgraph/ether.bridge
OLD_FILES+=usr/share/examples/netgraph/frame_relay
OLD_FILES+=usr/share/examples/netgraph/ngctl
OLD_FILES+=usr/share/examples/netgraph/raw
OLD_FILES+=usr/share/examples/netgraph/udp.tunnel
OLD_FILES+=usr/share/examples/netgraph/virtual.chain
OLD_FILES+=usr/share/examples/netgraph/virtual.lan
OLD_FILES+=usr/share/examples/pc-sysinstall/README
OLD_FILES+=usr/share/examples/pc-sysinstall/pc-autoinstall.conf
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.fbsd-netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.geli
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.gmirror
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.restore
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.rsync
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.upgrade
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.zfs
OLD_FILES+=usr/share/examples/perfmon/Makefile
OLD_FILES+=usr/share/examples/perfmon/README
OLD_FILES+=usr/share/examples/perfmon/perfmon.c
OLD_FILES+=usr/share/examples/pf/ackpri
OLD_FILES+=usr/share/examples/pf/faq-example1
OLD_FILES+=usr/share/examples/pf/faq-example2
OLD_FILES+=usr/share/examples/pf/faq-example3
OLD_FILES+=usr/share/examples/pf/pf.conf
OLD_FILES+=usr/share/examples/pf/queue1
OLD_FILES+=usr/share/examples/pf/queue2
OLD_FILES+=usr/share/examples/pf/queue3
OLD_FILES+=usr/share/examples/pf/queue4
OLD_FILES+=usr/share/examples/pf/spamd
OLD_FILES+=usr/share/examples/ppi/Makefile
OLD_FILES+=usr/share/examples/ppi/ppilcd.c
OLD_FILES+=usr/share/examples/ppp/chap-auth
OLD_FILES+=usr/share/examples/ppp/login-auth
OLD_FILES+=usr/share/examples/ppp/ppp.conf.sample
OLD_FILES+=usr/share/examples/ppp/ppp.conf.span-isp
OLD_FILES+=usr/share/examples/ppp/ppp.conf.span-isp.working
OLD_FILES+=usr/share/examples/ppp/ppp.linkdown.sample
OLD_FILES+=usr/share/examples/ppp/ppp.linkdown.span-isp
OLD_FILES+=usr/share/examples/ppp/ppp.linkdown.span-isp.working
OLD_FILES+=usr/share/examples/ppp/ppp.linkup.sample
OLD_FILES+=usr/share/examples/ppp/ppp.linkup.span-isp
OLD_FILES+=usr/share/examples/ppp/ppp.linkup.span-isp.working
OLD_FILES+=usr/share/examples/ppp/ppp.secret.sample
OLD_FILES+=usr/share/examples/ppp/ppp.secret.span-isp
OLD_FILES+=usr/share/examples/ppp/ppp.secret.span-isp.working
OLD_FILES+=usr/share/examples/printing/diablo-if-net
OLD_FILES+=usr/share/examples/printing/hpdf
OLD_FILES+=usr/share/examples/printing/hpif
OLD_FILES+=usr/share/examples/printing/hpof
OLD_FILES+=usr/share/examples/printing/hprf
OLD_FILES+=usr/share/examples/printing/hpvf
OLD_FILES+=usr/share/examples/printing/if-simple
OLD_FILES+=usr/share/examples/printing/if-simpleX
OLD_FILES+=usr/share/examples/printing/ifhp
OLD_FILES+=usr/share/examples/printing/make-ps-header
OLD_FILES+=usr/share/examples/printing/netprint
OLD_FILES+=usr/share/examples/printing/psdf
OLD_FILES+=usr/share/examples/printing/psdfX
OLD_FILES+=usr/share/examples/printing/psif
OLD_FILES+=usr/share/examples/printing/pstf
OLD_FILES+=usr/share/examples/printing/pstfX
OLD_FILES+=usr/share/examples/scsi_target/Makefile
OLD_FILES+=usr/share/examples/scsi_target/scsi_cmds.c
OLD_FILES+=usr/share/examples/scsi_target/scsi_target.8
OLD_FILES+=usr/share/examples/scsi_target/scsi_target.c
OLD_FILES+=usr/share/examples/scsi_target/scsi_target.h
OLD_FILES+=usr/share/examples/ses/Makefile
OLD_FILES+=usr/share/examples/ses/Makefile.inc
OLD_FILES+=usr/share/examples/ses/getencstat/Makefile
OLD_FILES+=usr/share/examples/ses/getencstat/getencstat.0
OLD_FILES+=usr/share/examples/ses/sesd/Makefile
OLD_FILES+=usr/share/examples/ses/sesd/sesd.0
OLD_FILES+=usr/share/examples/ses/setencstat/Makefile
OLD_FILES+=usr/share/examples/ses/setencstat/setencstat.0
OLD_FILES+=usr/share/examples/ses/setobjstat/Makefile
OLD_FILES+=usr/share/examples/ses/setobjstat/setobjstat.0
OLD_FILES+=usr/share/examples/ses/srcs/chpmon.c
OLD_FILES+=usr/share/examples/ses/srcs/eltsub.c
OLD_FILES+=usr/share/examples/ses/srcs/eltsub.h
OLD_FILES+=usr/share/examples/ses/srcs/getencstat.c
OLD_FILES+=usr/share/examples/ses/srcs/getnobj.c
OLD_FILES+=usr/share/examples/ses/srcs/getobjmap.c
OLD_FILES+=usr/share/examples/ses/srcs/getobjstat.c
OLD_FILES+=usr/share/examples/ses/srcs/inienc.c
OLD_FILES+=usr/share/examples/ses/srcs/sesd.c
OLD_FILES+=usr/share/examples/ses/srcs/setencstat.c
OLD_FILES+=usr/share/examples/ses/srcs/setobjstat.c
OLD_FILES+=usr/share/examples/smbfs/dot.nsmbrc
OLD_FILES+=usr/share/examples/smbfs/print/lj6l
OLD_FILES+=usr/share/examples/smbfs/print/ljspool
OLD_FILES+=usr/share/examples/smbfs/print/printcap.sample
OLD_FILES+=usr/share/examples/smbfs/print/tolj
OLD_FILES+=usr/share/examples/sunrpc/Makefile
OLD_FILES+=usr/share/examples/sunrpc/dir/Makefile
OLD_FILES+=usr/share/examples/sunrpc/dir/dir.x
OLD_FILES+=usr/share/examples/sunrpc/dir/dir_proc.c
OLD_FILES+=usr/share/examples/sunrpc/dir/rls.c
OLD_FILES+=usr/share/examples/sunrpc/msg/Makefile
OLD_FILES+=usr/share/examples/sunrpc/msg/msg.x
OLD_FILES+=usr/share/examples/sunrpc/msg/msg_proc.c
OLD_FILES+=usr/share/examples/sunrpc/msg/printmsg.c
OLD_FILES+=usr/share/examples/sunrpc/msg/rprintmsg.c
OLD_FILES+=usr/share/examples/sunrpc/sort/Makefile
OLD_FILES+=usr/share/examples/sunrpc/sort/rsort.c
OLD_FILES+=usr/share/examples/sunrpc/sort/sort.x
OLD_FILES+=usr/share/examples/sunrpc/sort/sort_proc.c
OLD_FILES+=usr/share/examples/tcsh/complete.tcsh
OLD_FILES+=usr/share/examples/tcsh/csh-mode.el
OLD_FILES+=usr/share/examples/uefisign/uefikeys
OLD_FILES+=usr/share/examples/ypldap/ypldap.conf
OLD_DIRS+=usr/share/examples
OLD_DIRS+=usr/share/examples/BSD_daemon
OLD_DIRS+=usr/share/examples/FreeBSD_version
OLD_DIRS+=usr/share/examples/IPv6
OLD_DIRS+=usr/share/examples/bhyve
OLD_DIRS+=usr/share/examples/bootforth
OLD_DIRS+=usr/share/examples/bsdconfig
OLD_DIRS+=usr/share/examples/csh
OLD_DIRS+=usr/share/examples/diskless
OLD_DIRS+=usr/share/examples/dma
OLD_DIRS+=usr/share/examples/drivers
OLD_DIRS+=usr/share/examples/dwatch
OLD_DIRS+=usr/share/examples/etc
OLD_DIRS+=usr/share/examples/etc/defaults
OLD_DIRS+=usr/share/examples/find_interface
OLD_DIRS+=usr/share/examples/hast
OLD_DIRS+=usr/share/examples/ibcs2
OLD_DIRS+=usr/share/examples/hostapd
OLD_DIRS+=usr/share/examples/indent
OLD_DIRS+=usr/share/examples/ipfilter
OLD_DIRS+=usr/share/examples/ipfw
OLD_DIRS+=usr/share/examples/jails
OLD_DIRS+=usr/share/examples/kld
OLD_DIRS+=usr/share/examples/kld/cdev
OLD_DIRS+=usr/share/examples/kld/cdev/module
OLD_DIRS+=usr/share/examples/kld/cdev/test
OLD_DIRS+=usr/share/examples/kld/dyn_sysctl
OLD_DIRS+=usr/share/examples/kld/firmware
OLD_DIRS+=usr/share/examples/kld/firmware/fwconsumer
OLD_DIRS+=usr/share/examples/kld/firmware/fwimage
OLD_DIRS+=usr/share/examples/kld/khelp
OLD_DIRS+=usr/share/examples/kld/syscall
OLD_DIRS+=usr/share/examples/kld/syscall/module
OLD_DIRS+=usr/share/examples/kld/syscall/test
OLD_DIRS+=usr/share/examples/libusb20
OLD_DIRS+=usr/share/examples/libvgl
OLD_DIRS+=usr/share/examples/mdoc
OLD_DIRS+=usr/share/examples/netgraph
OLD_DIRS+=usr/share/examples/pc-sysinstall
OLD_DIRS+=usr/share/examples/perfmon
OLD_DIRS+=usr/share/examples/pf
OLD_DIRS+=usr/share/examples/ppi
OLD_DIRS+=usr/share/examples/ppp
OLD_DIRS+=usr/share/examples/printing
OLD_DIRS+=usr/share/examples/scsi_target
OLD_DIRS+=usr/share/examples/ses
OLD_DIRS+=usr/share/examples/ses/getencstat
OLD_DIRS+=usr/share/examples/ses/sesd
OLD_DIRS+=usr/share/examples/ses/setencstat
OLD_DIRS+=usr/share/examples/ses/setobjstat
OLD_DIRS+=usr/share/examples/ses/srcs
OLD_DIRS+=usr/share/examples/smbfs
OLD_DIRS+=usr/share/examples/smbfs/print
OLD_DIRS+=usr/share/examples/sunrpc
OLD_DIRS+=usr/share/examples/sunrpc/dir
OLD_DIRS+=usr/share/examples/sunrpc/msg
OLD_DIRS+=usr/share/examples/sunrpc/sort
OLD_DIRS+=usr/share/examples/tcsh
OLD_DIRS+=usr/share/examples/uefisign
OLD_DIRS+=usr/share/examples/ypldap
.endif
.if ${MK_FINGER} == no
OLD_FILES+=usr/bin/finger
OLD_FILES+=usr/share/man/man1/finger.1.gz
OLD_FILES+=usr/share/man/man5/finger.conf.5.gz
OLD_FILES+=usr/libexec/fingerd
OLD_FILES+=usr/share/man/man8/fingerd.8.gz
.endif
.if ${MK_FLOPPY} == no
OLD_FILES+=usr/sbin/fdcontrol
OLD_FILES+=usr/sbin/fdformat
OLD_FILES+=usr/sbin/fdread
OLD_FILES+=usr/sbin/fdwrite
OLD_FILES+=usr/share/man/man1/fdformat.1.gz
OLD_FILES+=usr/share/man/man1/fdread.1.gz
OLD_FILES+=usr/share/man/man1/fdwrite.1.gz
OLD_FILES+=usr/share/man/man8/fdcontrol.8.gz
.endif
.if ${MK_FORTH} == no
OLD_FILES+=usr/share/man/man8/beastie.4th.8.gz
OLD_FILES+=usr/share/man/man8/brand.4th.8.gz
OLD_FILES+=usr/share/man/man8/check-password.4th.8.gz
OLD_FILES+=usr/share/man/man8/color.4th.8.gz
OLD_FILES+=usr/share/man/man8/delay.4th.8.gz
OLD_FILES+=usr/share/man/man8/loader.4th.8.gz
OLD_FILES+=usr/share/man/man8/menu.4th.8.gz
OLD_FILES+=usr/share/man/man8/menusets.4th.8.gz
OLD_FILES+=usr/share/man/man8/version.4th.8.gz
.endif
.if ${MK_FREEBSD_UPDATE} == no
OLD_FILES+=etc/freebsd-update.conf
OLD_FILES+=usr/sbin/freebsd-update
OLD_FILES+=usr/share/examples/etc/freebsd-update.conf
OLD_FILES+=usr/share/man/man5/freebsd-update.conf.5.gz
OLD_FILES+=usr/share/man/man8/freebsd-update.8.gz
.endif
.if ${MK_GAMES} == no
OLD_FILES+=usr/bin/caesar
OLD_FILES+=usr/bin/factor
OLD_FILES+=usr/bin/fortune
OLD_FILES+=usr/bin/grdc
OLD_FILES+=usr/bin/morse
OLD_FILES+=usr/bin/number
OLD_FILES+=usr/bin/pom
OLD_FILES+=usr/bin/primes
OLD_FILES+=usr/bin/random
OLD_FILES+=usr/bin/rot13
OLD_FILES+=usr/bin/strfile
OLD_FILES+=usr/bin/unstr
OLD_FILES+=usr/share/games/fortune/fortunes
OLD_FILES+=usr/share/games/fortune/fortunes.dat
OLD_FILES+=usr/share/games/fortune/freebsd-tips
OLD_FILES+=usr/share/games/fortune/freebsd-tips.dat
OLD_FILES+=usr/share/games/fortune/gerrold.limerick
OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat
OLD_FILES+=usr/share/games/fortune/limerick
OLD_FILES+=usr/share/games/fortune/limerick.dat
OLD_FILES+=usr/share/games/fortune/murphy
OLD_FILES+=usr/share/games/fortune/murphy-o
OLD_FILES+=usr/share/games/fortune/murphy-o.dat
OLD_FILES+=usr/share/games/fortune/murphy.dat
OLD_FILES+=usr/share/games/fortune/startrek
OLD_FILES+=usr/share/games/fortune/startrek.dat
OLD_FILES+=usr/share/games/fortune/zippy
OLD_FILES+=usr/share/games/fortune/zippy.dat
OLD_DIRS+=usr/share/games/fortune
OLD_DIRS+=usr/share/games
OLD_FILES+=usr/share/man/man6/caesar.6.gz
OLD_FILES+=usr/share/man/man6/factor.6.gz
OLD_FILES+=usr/share/man/man6/fortune.6.gz
OLD_FILES+=usr/share/man/man6/grdc.6.gz
OLD_FILES+=usr/share/man/man6/morse.6.gz
OLD_FILES+=usr/share/man/man6/number.6.gz
OLD_FILES+=usr/share/man/man6/pom.6.gz
OLD_FILES+=usr/share/man/man6/primes.6.gz
OLD_FILES+=usr/share/man/man6/random.6.gz
OLD_FILES+=usr/share/man/man6/rot13.6.gz
OLD_FILES+=usr/share/man/man8/strfile.8.gz
OLD_FILES+=usr/share/man/man8/unstr.8.gz
.endif
.if ${MK_GCC} == no
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/bin/gcc
OLD_FILES+=usr/bin/gcpp
OLD_FILES+=usr/bin/gperf
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/gcc/4.2/ammintrin.h
OLD_FILES+=usr/include/gcc/4.2/emmintrin.h
OLD_FILES+=usr/include/gcc/4.2/mm3dnow.h
OLD_FILES+=usr/include/gcc/4.2/mm_malloc.h
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
OLD_FILES+=usr/include/gcc/4.2/pmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/tmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/wmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/xmmintrin.h
.elif ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
.elif ${TARGET_ARCH} == "powerpc" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/include/gcc/4.2/altivec.h
OLD_FILES+=usr/include/gcc/4.2/ppc-asm.h
OLD_FILES+=usr/include/gcc/4.2/spe.h
.endif
.if ${MK_OPENMP} == no
OLD_FILES+=usr/include/omp.h
.endif
OLD_FILES+=usr/lib/libgcov.a
OLD_FILES+=usr/lib/libgomp.a
.if ${MK_OPENMP} == no
OLD_FILES+=usr/lib/libgomp.so
.endif
OLD_LIBS+=usr/lib/libgomp.so.1
OLD_FILES+=usr/lib/libgomp_p.a
OLD_FILES+=usr/lib32/libgcov.a
OLD_FILES+=usr/lib32/libgomp.a
.if ${MK_OPENMP} == no
OLD_FILES+=usr/lib32/libgomp.so
.endif
OLD_LIBS+=usr/lib32/libgomp.so.1
OLD_FILES+=usr/lib32/libgomp_p.a
OLD_FILES+=usr/libexec/cc1
OLD_FILES+=usr/libexec/cc1plus
OLD_FILES+=usr/share/man/man1/g++.1.gz
OLD_FILES+=usr/share/man/man1/gcc.1.gz
OLD_FILES+=usr/share/man/man1/gcpp.1.gz
OLD_FILES+=usr/share/man/man1/gperf.1.gz
OLD_FILES+=usr/share/man/man1/gperf.7.gz
.endif
.if (${MK_GCOV} == no || ${MK_GCC} == no) && ${MK_LLVM_COV} == no
OLD_FILES+=usr/bin/gcov
OLD_FILES+=usr/share/man/man1/gcov.1.gz
.endif
.if ${MK_LLVM_COV} == no
OLD_FILES+=usr/bin/llvm-cov
OLD_FILES+=usr/bin/llvm-profdata
OLD_FILES+=usr/share/man/man1/llvm-cov.1.gz
.endif
.if ${MK_GDB} == no || ${MK_GDB_LIBEXEC} == yes
OLD_FILES+=usr/bin/gdb
OLD_FILES+=usr/bin/gdbserver
OLD_FILES+=usr/bin/kgdb
OLD_FILES+=usr/share/man/man1/gdb.1.gz
OLD_FILES+=usr/share/man/man1/gdbserver.1.gz
OLD_FILES+=usr/share/man/man1/kgdb.1.gz
.endif
.if ${MK_GDB} == no || ${MK_GDB_LIBEXEC} == no
OLD_FILES+=usr/libexec/gdb
OLD_FILES+=usr/libexec/kgdb
.endif
.if ${MK_GOOGLETEST} == no
OLD_FILES+=usr/include/gmock/gmock-actions.h
OLD_FILES+=usr/include/gmock/gmock-cardinalities.h
OLD_FILES+=usr/include/gmock/gmock-generated-actions.h
OLD_FILES+=usr/include/gmock/gmock-generated-function-mockers.h
OLD_FILES+=usr/include/gmock/gmock-generated-matchers.h
OLD_FILES+=usr/include/gmock/gmock-generated-nice-strict.h
OLD_FILES+=usr/include/gmock/gmock-matchers.h
OLD_FILES+=usr/include/gmock/gmock-more-actions.h
OLD_FILES+=usr/include/gmock/gmock-more-matchers.h
OLD_FILES+=usr/include/gmock/gmock-spec-builders.h
OLD_FILES+=usr/include/gmock/gmock.h
OLD_FILES+=usr/include/gmock/internal/custom/gmock-generated-actions.h
OLD_FILES+=usr/include/gmock/internal/custom/gmock-matchers.h
OLD_FILES+=usr/include/gmock/internal/custom/gmock-port.h
OLD_FILES+=usr/include/gmock/internal/gmock-generated-internal-utils.h
OLD_FILES+=usr/include/gmock/internal/gmock-internal-utils.h
OLD_FILES+=usr/include/gmock/internal/gmock-port.h
OLD_DIRS+=usr/include/gmock
OLD_FILES+=usr/include/gtest/gtest_pred_impl.h
OLD_FILES+=usr/include/gtest/gtest_prod.h
OLD_FILES+=usr/include/gtest/gtest-death-test.h
OLD_FILES+=usr/include/gtest/gtest-message.h
OLD_FILES+=usr/include/gtest/gtest-param-test.h
OLD_FILES+=usr/include/gtest/gtest-printers.h
OLD_FILES+=usr/include/gtest/gtest-spi.h
OLD_FILES+=usr/include/gtest/gtest-test-part.h
OLD_FILES+=usr/include/gtest/gtest-typed-test.h
OLD_FILES+=usr/include/gtest/gtest.h
OLD_FILES+=usr/include/gtest/internal/custom/gtest-port.h
OLD_FILES+=usr/include/gtest/internal/custom/gtest-printers.h
OLD_FILES+=usr/include/gtest/internal/custom/gtest.h
OLD_FILES+=usr/include/gtest/internal/gtest-death-test-internal.h
OLD_FILES+=usr/include/gtest/internal/gtest-filepath.h
OLD_FILES+=usr/include/gtest/internal/gtest-internal.h
OLD_FILES+=usr/include/gtest/internal/gtest-linked_ptr.h
OLD_FILES+=usr/include/gtest/internal/gtest-param-util-generated.h
OLD_FILES+=usr/include/gtest/internal/gtest-param-util.h
OLD_FILES+=usr/include/gtest/internal/gtest-port-arch.h
OLD_FILES+=usr/include/gtest/internal/gtest-port.h
OLD_FILES+=usr/include/gtest/internal/gtest-string.h
OLD_FILES+=usr/include/gtest/internal/gtest-tuple.h
OLD_FILES+=usr/include/gtest/internal/gtest-type-util.h
OLD_DIRS+=usr/include/gtest
OLD_FILES+=usr/lib/libprivategmock_main.a
OLD_FILES+=usr/lib/libprivategmock_main.so
OLD_LIBS+=usr/lib/libprivategmock_main.so.0
OLD_FILES+=usr/lib/libprivategmock_main_p.a
OLD_FILES+=usr/lib/libprivategmock.a
OLD_FILES+=usr/lib/libprivategmock.so
OLD_LIBS+=usr/lib/libprivategmock.so.0
OLD_FILES+=usr/lib/libprivategmock_p.a
OLD_FILES+=usr/lib/libprivategtest_main.a
OLD_FILES+=usr/lib/libprivategtest_main.so
OLD_LIBS+=usr/lib/libprivategtest_main.so.0
OLD_FILES+=usr/lib/libprivategtest_main_p.a
OLD_FILES+=usr/lib/libprivategtest.a
OLD_FILES+=usr/lib/libprivategtest.so
OLD_LIBS+=usr/lib/libprivategtest.so.0
OLD_FILES+=usr/lib/libprivategtest_p.a
OLD_FILES+=usr/tests/lib/googletest/gmock/gmock_stress_test
OLD_FILES+=usr/tests/lib/googletest/gmock/Kyuafile
OLD_DIRS+=usr/tests/lib/googletest/gmock
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock_ex_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock_link_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-actions_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-cardinalities_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-ex_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-actions_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-function-mockers_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-internal-utils_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-generated-matchers_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-internal-utils_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-matchers_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-more-actions_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-nice-strict_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-port_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/gmock-spec-builders_test
OLD_FILES+=usr/tests/lib/googletest/gmock_main/Kyuafile
OLD_DIRS+=usr/tests/lib/googletest/gmock_main
OLD_FILES+=usr/tests/lib/googletest/gtest/googletest-param-test-test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_all_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_environment_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_no_test_unittest
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_premature_exit_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_repeat_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_stress_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest_throw_on_failure_ex_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest-death-test_ex_catch_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest-death-test_ex_nocatch_test
OLD_FILES+=usr/tests/lib/googletest/gtest/gtest-unittest-api_test
OLD_FILES+=usr/tests/lib/googletest/gtest/Kyuafile
OLD_DIRS+=usr/tests/lib/googletest/gtest
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-death-test-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-filepath-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-linked-ptr-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-listener-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-message-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-options-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-port-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-printers-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/googletest-test-part-test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_help_test_
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_main_unittest
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_pred_impl_unittest
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_prod_test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_sole_header_test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_unittest
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_xml_outfile1_test_
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest_xml_outfile2_test_
OLD_FILES+=usr/tests/lib/googletest/gtest_main/gtest-typed-test_test
OLD_FILES+=usr/tests/lib/googletest/gtest_main/Kyuafile
OLD_DIRS+=usr/tests/lib/googletest/gtest_main
OLD_FILES+=usr/tests/lib/googletest/Kyuafile
OLD_DIRS+=usr/tests/lib/googletest/
OLD_FILES+=usr/tests/share/examples/tests/googletest/Kyuafile
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample1_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample10_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample2_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample3_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample4_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample5_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample6_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample7_unittest
OLD_FILES+=usr/tests/share/examples/tests/googletest/sample8_unittest
OLD_DIRS+=usr/tests/share/examples/tests/googletest
.endif
.if ${MK_GPIO} == no
OLD_FILES+=usr/include/libgpio.h
OLD_FILES+=usr/lib/libgpio.a
OLD_FILES+=usr/lib/libgpio.so
OLD_LIBS+=usr/lib/libgpio.so.0
OLD_FILES+=usr/lib/libgpio_p.a
OLD_FILES+=usr/lib32/libgpio.a
OLD_FILES+=usr/lib32/libgpio.so
OLD_LIBS+=usr/lib32/libgpio.so.0
OLD_FILES+=usr/lib32/libgpio_p.a
OLD_FILES+=usr/sbin/gpioctl
OLD_FILES+=usr/share/man/man3/gpio.3.gz
OLD_FILES+=usr/share/man/man3/gpio_close.3.gz
OLD_FILES+=usr/share/man/man3/gpio_open.3.gz
OLD_FILES+=usr/share/man/man3/gpio_open_device.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_config.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_get.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_high.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_input.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_invin.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_invout.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_list.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_low.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_opendrain.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_output.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pulldown.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pullup.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pulsate.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pushpull.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_set.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_tristate.3.gz
OLD_FILES+=usr/share/man/man8/gpioctl.8.gz
.endif
.if ${MK_GNU_DIFF} == no
OLD_FILES+=usr/bin/diff3
OLD_FILES+=usr/share/man/man1/diff3.1.gz
.endif
.if ${MK_GNU_GREP} == no
OLD_FILES+=usr/bin/gnugrep
OLD_FILES+=usr/share/man/man1/gnugrep.1.gz
.if ${MK_BSD_GREP} == no
OLD_FILES+=usr/bin/bzgrep
OLD_FILES+=usr/bin/bzegrep
OLD_FILES+=usr/bin/bzfgrep
OLD_FILES+=usr/bin/egrep
OLD_FILES+=usr/bin/fgrep
OLD_FILES+=usr/bin/grep
OLD_FILES+=usr/bin/zegrep
OLD_FILES+=usr/bin/zfgrep
OLD_FILES+=usr/bin/zgrep
OLD_FILES+=usr/share/man/man1/bzegrep.1.gz
OLD_FILES+=usr/share/man/man1/bzfgrep.1.gz
OLD_FILES+=usr/share/man/man1/bzgrep.1.gz
OLD_FILES+=usr/share/man/man1/egrep.1.gz
OLD_FILES+=usr/share/man/man1/fgrep.1.gz
OLD_FILES+=usr/share/man/man1/grep.1.gz
OLD_FILES+=usr/share/man/man1/zegrep.1.gz
OLD_FILES+=usr/share/man/man1/zfgrep.1.gz
OLD_FILES+=usr/share/man/man1/zgrep.1.gz
.endif
.endif
.if ${MK_GSSAPI} == no
OLD_FILES+=usr/include/gssapi/gssapi.h
OLD_DIRS+=usr/include/gssapi
OLD_FILES+=usr/include/gssapi.h
OLD_FILES+=usr/lib/libgssapi.a
OLD_FILES+=usr/lib/libgssapi.so
OLD_LIBS+=usr/lib/libgssapi.so.10
OLD_FILES+=usr/lib/libgssapi_p.a
OLD_FILES+=usr/lib/librpcsec_gss.a
OLD_FILES+=usr/lib/librpcsec_gss.so
OLD_LIBS+=usr/lib/librpcsec_gss.so.1
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libgssapi.a
OLD_FILES+=usr/lib32/libgssapi.so
OLD_LIBS+=usr/lib32/libgssapi.so.10
OLD_FILES+=usr/lib32/libgssapi_p.a
OLD_FILES+=usr/lib32/librpcsec_gss.a
OLD_FILES+=usr/lib32/librpcsec_gss.so
OLD_LIBS+=usr/lib32/librpcsec_gss.so.1
.endif
OLD_FILES+=usr/sbin/gssd
OLD_FILES+=usr/share/man/man3/gss_accept_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_acquire_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_add_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_add_oid_set_member.3.gz
OLD_FILES+=usr/share/man/man3/gss_canonicalize_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_compare_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_context_time.3.gz
OLD_FILES+=usr/share/man/man3/gss_create_empty_oid_set.3.gz
OLD_FILES+=usr/share/man/man3/gss_delete_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_display_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_display_status.3.gz
OLD_FILES+=usr/share/man/man3/gss_duplicate_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_export_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_export_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_get_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_import_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_import_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_indicate_mechs.3.gz
OLD_FILES+=usr/share/man/man3/gss_init_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_cred_by_mech.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_mechs_for_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_names_for_mech.3.gz
OLD_FILES+=usr/share/man/man3/gss_process_context_token.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_buffer.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_oid_set.3.gz
OLD_FILES+=usr/share/man/man3/gss_seal.3.gz
OLD_FILES+=usr/share/man/man3/gss_sign.3.gz
OLD_FILES+=usr/share/man/man3/gss_test_oid_set_member.3.gz
OLD_FILES+=usr/share/man/man3/gss_unseal.3.gz
OLD_FILES+=usr/share/man/man3/gss_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/gss_verify.3.gz
OLD_FILES+=usr/share/man/man3/gss_verify_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_wrap.3.gz
OLD_FILES+=usr/share/man/man3/gss_wrap_size_limit.3.gz
OLD_FILES+=usr/share/man/man3/gssapi.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_error.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_mech_info.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_mechanisms.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_principal_name.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_versions.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_getcred.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_is_installed.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_max_data_length.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_mech_to_oid.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_oid_to_mech.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_qop_to_num.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_seccreate.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_callback.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_defaults.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_svc_name.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_svc_max_data_length.3.gz
OLD_FILES+=usr/share/man/man3/rpcsec_gss.3.gz
OLD_FILES+=usr/share/man/man5/mech.5.gz
OLD_FILES+=usr/share/man/man5/qop.5.gz
OLD_FILES+=usr/share/man/man8/gssd.8.gz
.endif
.if ${MK_HAST} == no
OLD_FILES+=sbin/hastctl
OLD_FILES+=sbin/hastd
OLD_FILES+=usr/share/examples/hast/ucarp.sh
OLD_FILES+=usr/share/examples/hast/ucarp_down.sh
OLD_FILES+=usr/share/examples/hast/ucarp_up.sh
OLD_FILES+=usr/share/examples/hast/vip-down.sh
OLD_FILES+=usr/share/examples/hast/vip-up.sh
OLD_FILES+=usr/share/man/man5/hast.conf.5.gz
OLD_FILES+=usr/share/man/man8/hastctl.8.gz
OLD_FILES+=usr/share/man/man8/hastd.8.gz
OLD_DIRS+=usr/share/examples/hast
# bsnmp
OLD_FILES+=usr/lib/snmp_hast.so
OLD_LIBS+=usr/lib/snmp_hast.so.6
OLD_FILES+=usr/share/man/man3/snmp_hast.3.gz
OLD_FILES+=usr/share/snmp/defs/hast_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HAST-MIB.txt
.endif
.if ${MK_HESIOD} == no
OLD_FILES+=usr/bin/hesinfo
OLD_FILES+=usr/include/hesiod.h
OLD_FILES+=usr/share/man/man1/hesinfo.1.gz
OLD_FILES+=usr/share/man/man3/hesiod.3.gz
OLD_FILES+=usr/share/man/man5/hesiod.conf.5.gz
.endif
.if ${MK_HTML} == no
OLD_FILES+=usr/share/doc/ncurses/hackguide.html
OLD_FILES+=usr/share/doc/ncurses/ncurses-intro.html
OLD_DIRS+=usr/share/doc/ncurses
OLD_FILES+=usr/share/doc/ntp/accopt.html
OLD_FILES+=usr/share/doc/ntp/assoc.html
OLD_FILES+=usr/share/doc/ntp/audio.html
OLD_FILES+=usr/share/doc/ntp/authopt.html
OLD_FILES+=usr/share/doc/ntp/build.html
OLD_FILES+=usr/share/doc/ntp/clockopt.html
OLD_FILES+=usr/share/doc/ntp/config.html
OLD_FILES+=usr/share/doc/ntp/confopt.html
OLD_FILES+=usr/share/doc/ntp/copyright.html
OLD_FILES+=usr/share/doc/ntp/debug.html
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/extern.html
OLD_FILES+=usr/share/doc/ntp/hints.html
OLD_FILES+=usr/share/doc/ntp/howto.html
OLD_FILES+=usr/share/doc/ntp/index.html
OLD_FILES+=usr/share/doc/ntp/kern.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/miscopt.html
OLD_FILES+=usr/share/doc/ntp/monopt.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/ntpd.html
OLD_FILES+=usr/share/doc/ntp/ntpdate.html
OLD_FILES+=usr/share/doc/ntp/ntpdc.html
OLD_FILES+=usr/share/doc/ntp/ntpq.html
OLD_FILES+=usr/share/doc/ntp/ntptime.html
OLD_FILES+=usr/share/doc/ntp/ntptrace.html
OLD_FILES+=usr/share/doc/ntp/parsedata.html
OLD_FILES+=usr/share/doc/ntp/parsenew.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/doc/ntp/pps.html
OLD_FILES+=usr/share/doc/ntp/prefer.html
OLD_FILES+=usr/share/doc/ntp/quick.html
OLD_FILES+=usr/share/doc/ntp/rdebug.html
OLD_FILES+=usr/share/doc/ntp/refclock.html
OLD_FILES+=usr/share/doc/ntp/release.html
OLD_FILES+=usr/share/doc/ntp/tickadj.html
.endif
.if ${MK_ICONV} == no
OLD_FILES+=usr/bin/iconv
OLD_FILES+=usr/bin/mkcsmapper
OLD_FILES+=usr/bin/mkesdb
OLD_FILES+=usr/include/_libiconv_compat.h
OLD_FILES+=usr/include/iconv.h
OLD_FILES+=usr/share/man/man1/iconv.1.gz
OLD_FILES+=usr/share/man/man1/mkcsmapper.1.gz
OLD_FILES+=usr/share/man/man1/mkesdb.1.gz
OLD_FILES+=usr/share/man/man3/__iconv.3.gz
OLD_FILES+=usr/share/man/man3/__iconv_free_list.3.gz
OLD_FILES+=usr/share/man/man3/__iconv_get_list.3.gz
OLD_FILES+=usr/share/man/man3/iconv.3.gz
OLD_FILES+=usr/share/man/man3/iconv_canonicalize.3.gz
OLD_FILES+=usr/share/man/man3/iconv_close.3.gz
OLD_FILES+=usr/share/man/man3/iconv_open.3.gz
OLD_FILES+=usr/share/man/man3/iconv_open_into.3.gz
OLD_FILES+=usr/share/man/man3/iconvctl.3.gz
OLD_FILES+=usr/share/man/man3/iconvlist.3.gz
OLD_DIRS+=usr/share/i18n
OLD_DIRS+=usr/share/i18n/esdb
OLD_DIRS+=usr/share/i18n/esdb/ISO-2022
OLD_DIRS+=usr/share/i18n/esdb/BIG5
OLD_DIRS+=usr/share/i18n/esdb/MISC
OLD_DIRS+=usr/share/i18n/esdb/TCVN
OLD_DIRS+=usr/share/i18n/esdb/EBCDIC
OLD_DIRS+=usr/share/i18n/esdb/ISO-8859
OLD_DIRS+=usr/share/i18n/esdb/GEORGIAN
OLD_DIRS+=usr/share/i18n/esdb/AST
OLD_DIRS+=usr/share/i18n/esdb/KAZAKH
OLD_DIRS+=usr/share/i18n/esdb/APPLE
OLD_DIRS+=usr/share/i18n/esdb/EUC
OLD_DIRS+=usr/share/i18n/esdb/CP
OLD_DIRS+=usr/share/i18n/esdb/DEC
OLD_DIRS+=usr/share/i18n/esdb/UTF
OLD_DIRS+=usr/share/i18n/esdb/GB
OLD_DIRS+=usr/share/i18n/esdb/ISO646
OLD_DIRS+=usr/share/i18n/esdb/KOI
OLD_DIRS+=usr/share/i18n/csmapper
OLD_DIRS+=usr/share/i18n/csmapper/KAZAKH
OLD_DIRS+=usr/share/i18n/csmapper/CNS
OLD_DIRS+=usr/share/i18n/csmapper/BIG5
OLD_DIRS+=usr/share/i18n/csmapper/JIS
OLD_DIRS+=usr/share/i18n/csmapper/KOI
OLD_DIRS+=usr/share/i18n/csmapper/TCVN
OLD_DIRS+=usr/share/i18n/csmapper/MISC
OLD_DIRS+=usr/share/i18n/csmapper/EBCDIC
OLD_DIRS+=usr/share/i18n/csmapper/ISO646
OLD_DIRS+=usr/share/i18n/csmapper/CP
OLD_DIRS+=usr/share/i18n/csmapper/GEORGIAN
OLD_DIRS+=usr/share/i18n/csmapper/ISO-8859
OLD_DIRS+=usr/share/i18n/csmapper/AST
OLD_DIRS+=usr/share/i18n/csmapper/APPLE
OLD_DIRS+=usr/share/i18n/csmapper/KS
OLD_DIRS+=usr/share/i18n/csmapper/GB
.endif
.if ${MK_INET6} == no
OLD_FILES+=sbin/ping6
OLD_FILES+=sbin/rtsol
OLD_FILES+=usr/sbin/ip6addrctl
OLD_FILES+=usr/sbin/mld6query
OLD_FILES+=usr/sbin/ndp
OLD_FILES+=usr/sbin/rip6query
OLD_FILES+=usr/sbin/route6d
OLD_FILES+=usr/sbin/rrenumd
OLD_FILES+=usr/sbin/rtadvctl
OLD_FILES+=usr/sbin/rtadvd
OLD_FILES+=usr/sbin/rtsold
OLD_FILES+=usr/sbin/traceroute6
OLD_FILES+=usr/share/doc/IPv6/IMPLEMENTATION
OLD_FILES+=usr/share/man/man5/rrenumd.conf.5.gz
OLD_FILES+=usr/share/man/man5/rtadvd.conf.5.gz
OLD_FILES+=usr/share/man/man8/ip6addrctl.8.gz
OLD_FILES+=usr/share/man/man8/mld6query.8.gz
OLD_FILES+=usr/share/man/man8/ndp.8.gz
OLD_FILES+=usr/share/man/man8/ping6.8.gz
OLD_FILES+=usr/share/man/man8/rip6query.8.gz
OLD_FILES+=usr/share/man/man8/route6d.8.gz
OLD_FILES+=usr/share/man/man8/rrenumd.8.gz
OLD_FILES+=usr/share/man/man8/rtadvctl.8.gz
OLD_FILES+=usr/share/man/man8/rtadvd.8.gz
OLD_FILES+=usr/share/man/man8/rtsol.8.gz
OLD_FILES+=usr/share/man/man8/rtsold.8.gz
OLD_FILES+=usr/share/man/man8/traceroute6.8.gz
.endif
.if ${MK_INET6_SUPPORT} == no
OLD_FILES+=rescue/ping6
OLD_FILES+=rescue/rtsol
.endif
.if ${MK_INETD} == no
OLD_FILES+=etc/rc.d/inetd
OLD_FILES+=usr/sbin/inetd
OLD_FILES+=usr/share/man/man5/inetd.conf.5.gz
OLD_FILES+=usr/share/man/man8/inetd.8.gz
.endif
.if ${MK_IPFILTER} == no
OLD_FILES+=etc/periodic/security/510.ipfdenied
OLD_FILES+=etc/periodic/security/610.ipf6denied
OLD_FILES+=etc/rc.d/ipfilter
OLD_FILES+=etc/rc.d/ipfs
OLD_FILES+=etc/rc.d/ipmon
OLD_FILES+=etc/rc.d/ipnat
OLD_FILES+=etc/rc.d/ippool
OLD_FILES+=rescue/ipf
OLD_FILES+=sbin/ipf
OLD_FILES+=sbin/ipfs
OLD_FILES+=sbin/ipfstat
OLD_FILES+=sbin/ipftest
OLD_FILES+=sbin/ipmon
OLD_FILES+=sbin/ipnat
OLD_FILES+=sbin/ippool
OLD_FILES+=sbin/ipresend
OLD_FILES+=usr/include/netinet/ip_auth.h
OLD_FILES+=usr/include/netinet/ip_compat.h
OLD_FILES+=usr/include/netinet/ip_fil.h
OLD_FILES+=usr/include/netinet/ip_frag.h
OLD_FILES+=usr/include/netinet/ip_htable.h
OLD_FILES+=usr/include/netinet/ip_lookup.h
OLD_FILES+=usr/include/netinet/ip_nat.h
OLD_FILES+=usr/include/netinet/ip_pool.h
OLD_FILES+=usr/include/netinet/ip_proxy.h
OLD_FILES+=usr/include/netinet/ip_rules.h
OLD_FILES+=usr/include/netinet/ip_scan.h
OLD_FILES+=usr/include/netinet/ip_state.h
OLD_FILES+=usr/include/netinet/ip_sync.h
OLD_FILES+=usr/include/netinet/ipl.h
OLD_FILES+=usr/share/examples/ipfilter/README
OLD_FILES+=usr/share/examples/ipfilter/BASIC.NAT
OLD_FILES+=usr/share/examples/ipfilter/BASIC_1.FW
OLD_FILES+=usr/share/examples/ipfilter/BASIC_2.FW
OLD_FILES+=usr/share/examples/ipfilter/example.1
OLD_FILES+=usr/share/examples/ipfilter/example.2
OLD_FILES+=usr/share/examples/ipfilter/example.3
OLD_FILES+=usr/share/examples/ipfilter/example.4
OLD_FILES+=usr/share/examples/ipfilter/example.5
OLD_FILES+=usr/share/examples/ipfilter/example.6
OLD_FILES+=usr/share/examples/ipfilter/example.7
OLD_FILES+=usr/share/examples/ipfilter/example.8
OLD_FILES+=usr/share/examples/ipfilter/example.9
OLD_FILES+=usr/share/examples/ipfilter/example.10
OLD_FILES+=usr/share/examples/ipfilter/example.11
OLD_FILES+=usr/share/examples/ipfilter/example.12
OLD_FILES+=usr/share/examples/ipfilter/example.13
OLD_FILES+=usr/share/examples/ipfilter/example.sr
OLD_FILES+=usr/share/examples/ipfilter/firewall
OLD_FILES+=usr/share/examples/ipfilter/ftp-proxy
OLD_FILES+=usr/share/examples/ipfilter/ftppxy
OLD_FILES+=usr/share/examples/ipfilter/nat-setup
OLD_FILES+=usr/share/examples/ipfilter/nat.eg
OLD_FILES+=usr/share/examples/ipfilter/server
OLD_FILES+=usr/share/examples/ipfilter/tcpstate
OLD_FILES+=usr/share/examples/ipfilter/example.14
OLD_FILES+=usr/share/examples/ipfilter/firewall.1
OLD_FILES+=usr/share/examples/ipfilter/firewall.2
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.permissive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.restrictive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/ipnat.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/ipf-howto.txt
OLD_FILES+=usr/share/examples/ipfilter/examples.txt
OLD_FILES+=usr/share/examples/ipfilter/rules.txt
OLD_FILES+=usr/share/examples/ipfilter/mkfilters
OLD_DIRS+=usr/share/examples/ipfilter
OLD_FILES+=usr/share/man/man1/ipftest.1.gz
OLD_FILES+=usr/share/man/man1/ipresend.1.gz
OLD_FILES+=usr/share/man/man4/ipf.4.gz
OLD_FILES+=usr/share/man/man4/ipl.4.gz
OLD_FILES+=usr/share/man/man4/ipfilter.4.gz
OLD_FILES+=usr/share/man/man4/ipnat.4.gz
OLD_FILES+=usr/share/man/man5/ipf.5.gz
OLD_FILES+=usr/share/man/man5/ipf.conf.5.gz
OLD_FILES+=usr/share/man/man5/ipf6.conf.5.gz
OLD_FILES+=usr/share/man/man5/ipfilter.5.gz
OLD_FILES+=usr/share/man/man8/ipmon.5.gz
OLD_FILES+=usr/share/man/man5/ipmon.conf.5.gz
OLD_FILES+=usr/share/man/man5/ipnat.5.gz
OLD_FILES+=usr/share/man/man5/ipnat.conf.5.gz
OLD_FILES+=usr/share/man/man5/ippool.5.gz
OLD_FILES+=usr/share/man/man8/ipf.8.gz
OLD_FILES+=usr/share/man/man8/ipfs.8.gz
OLD_FILES+=usr/share/man/man8/ipfstat.8.gz
OLD_FILES+=usr/share/man/man8/ipmon.8.gz
OLD_FILES+=usr/share/man/man8/ipnat.8.gz
OLD_FILES+=usr/share/man/man8/ippool.8.gz
.endif
.if ${MK_IPFW} == no
OLD_FILES+=etc/periodic/security/500.ipfwdenied
OLD_FILES+=etc/periodic/security/550.ipfwlimit
OLD_FILES+=sbin/ipfw
OLD_FILES+=sbin/natd
OLD_FILES+=usr/sbin/ipfwpcap
OLD_FILES+=usr/share/man/man8/ipfw.8.gz
OLD_FILES+=usr/share/man/man8/ipfwpcap.8.gz
OLD_FILES+=usr/share/man/man8/natd.8.gz
.endif
.if ${MK_ISCSI} == no
OLD_FILES+=etc/rc.d/iscsictl
OLD_FILES+=etc/rc.d/iscsid
OLD_FILES+=rescue/iscsictl
OLD_FILES+=rescue/iscsid
OLD_FILES+=sbin/iscontrol
OLD_FILES+=usr/bin/iscsictl
OLD_FILES+=usr/sbin/iscsid
OLD_FILES+=usr/share/man/man4/iscsi.4.gz
OLD_FILES+=usr/share/man/man4/iscsi_initiator.4.gz
OLD_FILES+=usr/share/man/man5/iscsi.conf.5.gz
OLD_FILES+=usr/share/man/man8/iscontrol.8.gz
OLD_FILES+=usr/share/man/man8/iscsictl.8.gz
OLD_FILES+=usr/share/man/man8/iscsid.8.gz
.endif
.if ${MK_JAIL} == no
OLD_FILES+=etc/rc.d/jail
OLD_FILES+=usr/sbin/jail
OLD_FILES+=usr/sbin/jexec
OLD_FILES+=usr/sbin/jls
OLD_FILES+=usr/share/man/man5/jail.conf.5.gz
OLD_FILES+=usr/share/man/man8/jail.8.gz
OLD_FILES+=usr/share/man/man8/jexec.8.gz
OLD_FILES+=usr/share/man/man8/jls.8.gz
.endif
.if ${MK_KDUMP} == no
OLD_FILES+=usr/bin/kdump
OLD_FILES+=usr/bin/truss
OLD_FILES+=usr/share/man/man1/kdump.1.gz
OLD_FILES+=usr/share/man/man1/truss.1.gz
.endif
.if ${MK_KERBEROS} == no
OLD_FILES+=etc/rc.d/ipropd_master
OLD_FILES+=etc/rc.d/ipropd_slave
OLD_FILES+=usr/bin/asn1_compile
OLD_FILES+=usr/bin/compile_et
OLD_FILES+=usr/bin/hxtool
OLD_FILES+=usr/bin/kadmin
OLD_FILES+=usr/bin/kcc
OLD_FILES+=usr/bin/kdestroy
OLD_FILES+=usr/bin/kf
OLD_FILES+=usr/bin/kgetcred
OLD_FILES+=usr/bin/kinit
OLD_FILES+=usr/bin/klist
OLD_FILES+=usr/bin/kpasswd
OLD_FILES+=usr/bin/krb5-config
OLD_FILES+=usr/bin/ksu
OLD_FILES+=usr/bin/kswitch
OLD_FILES+=usr/bin/make-roken
OLD_FILES+=usr/bin/slc
OLD_FILES+=usr/bin/string2key
OLD_FILES+=usr/bin/verify_krb5_conf
OLD_FILES+=usr/include/asn1-common.h
OLD_FILES+=usr/include/asn1_err.h
OLD_FILES+=usr/include/base64.h
OLD_FILES+=usr/include/cms_asn1.h
OLD_FILES+=usr/include/crmf_asn1.h
OLD_FILES+=usr/include/der-private.h
OLD_FILES+=usr/include/der-protos.h
OLD_FILES+=usr/include/der.h
OLD_FILES+=usr/include/digest_asn1.h
OLD_FILES+=usr/include/getarg.h
OLD_FILES+=usr/include/gssapi/gssapi_krb5.h
OLD_FILES+=usr/include/hdb-protos.h
OLD_FILES+=usr/include/hdb.h
OLD_FILES+=usr/include/hdb_asn1.h
OLD_FILES+=usr/include/hdb_err.h
OLD_FILES+=usr/include/heim_asn1.h
OLD_FILES+=usr/include/heim_err.h
OLD_FILES+=usr/include/heim_threads.h
OLD_FILES+=usr/include/heimbase.h
OLD_FILES+=usr/include/heimntlm-protos.h
OLD_FILES+=usr/include/heimntlm.h
OLD_FILES+=usr/include/hex.h
OLD_FILES+=usr/include/hx509-private.h
OLD_FILES+=usr/include/hx509-protos.h
OLD_FILES+=usr/include/hx509.h
OLD_FILES+=usr/include/hx509_err.h
OLD_FILES+=usr/include/k524_err.h
OLD_FILES+=usr/include/kadm5/admin.h
OLD_FILES+=usr/include/kadm5/kadm5-private.h
OLD_FILES+=usr/include/kadm5/kadm5-protos.h
OLD_FILES+=usr/include/kadm5/kadm5-pwcheck.h
OLD_FILES+=usr/include/kadm5/kadm5_err.h
OLD_FILES+=usr/include/kadm5/private.h
OLD_DIRS+=usr/include/kadm5
OLD_FILES+=usr/include/kafs.h
OLD_FILES+=usr/include/kdc-protos.h
OLD_FILES+=usr/include/kdc.h
OLD_FILES+=usr/include/krb5-private.h
OLD_FILES+=usr/include/krb5-protos.h
OLD_FILES+=usr/include/krb5-types.h
OLD_FILES+=usr/include/krb5.h
OLD_FILES+=usr/include/krb5/ccache_plugin.h
OLD_FILES+=usr/include/krb5/locate_plugin.h
OLD_FILES+=usr/include/krb5/send_to_kdc_plugin.h
OLD_FILES+=usr/include/krb5/windc_plugin.h
OLD_DIRS+=usr/include/krb5
OLD_FILES+=usr/include/krb5_asn1.h
OLD_FILES+=usr/include/krb5_ccapi.h
OLD_FILES+=usr/include/krb5_err.h
OLD_FILES+=usr/include/kx509_asn1.h
OLD_FILES+=usr/include/ntlm_err.h
OLD_FILES+=usr/include/ocsp_asn1.h
OLD_FILES+=usr/include/parse_bytes.h
OLD_FILES+=usr/include/parse_time.h
OLD_FILES+=usr/include/parse_units.h
OLD_FILES+=usr/include/pkcs10_asn1.h
OLD_FILES+=usr/include/pkcs12_asn1.h
OLD_FILES+=usr/include/pkcs8_asn1.h
OLD_FILES+=usr/include/pkcs9_asn1.h
OLD_FILES+=usr/include/pkinit_asn1.h
OLD_FILES+=usr/include/resolve.h
OLD_FILES+=usr/include/rfc2459_asn1.h
OLD_FILES+=usr/include/roken-common.h
OLD_FILES+=usr/include/rtbl.h
OLD_FILES+=usr/include/wind.h
OLD_FILES+=usr/include/wind_err.h
OLD_FILES+=usr/include/xdbm.h
OLD_FILES+=usr/lib/libasn1.a
OLD_FILES+=usr/lib/libasn1.so
OLD_LIBS+=usr/lib/libasn1.so.11
OLD_FILES+=usr/lib/libasn1_p.a
OLD_FILES+=usr/lib/libcom_err.a
OLD_FILES+=usr/lib/libcom_err.so
OLD_LIBS+=usr/lib/libcom_err.so.5
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib/libgssapi_krb5.a
OLD_FILES+=usr/lib/libgssapi_krb5.so
OLD_LIBS+=usr/lib/libgssapi_krb5.so.10
OLD_FILES+=usr/lib/libgssapi_krb5_p.a
OLD_FILES+=usr/lib/libgssapi_ntlm.a
OLD_FILES+=usr/lib/libgssapi_ntlm.so
OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10
OLD_FILES+=usr/lib/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib/libgssapi_spnego.a
OLD_FILES+=usr/lib/libgssapi_spnego.so
OLD_LIBS+=usr/lib/libgssapi_spnego.so.10
OLD_FILES+=usr/lib/libgssapi_spnego_p.a
OLD_FILES+=usr/lib/libhdb.a
OLD_FILES+=usr/lib/libhdb.so
OLD_LIBS+=usr/lib/libhdb.so.11
OLD_FILES+=usr/lib/libhdb_p.a
OLD_FILES+=usr/lib/libheimbase.a
OLD_FILES+=usr/lib/libheimbase.so
OLD_LIBS+=usr/lib/libheimbase.so.11
OLD_FILES+=usr/lib/libheimbase_p.a
OLD_FILES+=usr/lib/libheimntlm.a
OLD_FILES+=usr/lib/libheimntlm.so
OLD_LIBS+=usr/lib/libheimntlm.so.11
OLD_FILES+=usr/lib/libheimntlm_p.a
OLD_FILES+=usr/lib/libheimsqlite.a
OLD_FILES+=usr/lib/libheimsqlite.so
OLD_LIBS+=usr/lib/libheimsqlite.so.11
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib/libhx509.a
OLD_FILES+=usr/lib/libhx509.so
OLD_LIBS+=usr/lib/libhx509.so.11
OLD_FILES+=usr/lib/libhx509_p.a
OLD_FILES+=usr/lib/libkadm5clnt.a
OLD_FILES+=usr/lib/libkadm5clnt.so
OLD_LIBS+=usr/lib/libkadm5clnt.so.11
OLD_FILES+=usr/lib/libkadm5clnt_p.a
OLD_FILES+=usr/lib/libkadm5srv.a
OLD_FILES+=usr/lib/libkadm5srv.so
OLD_LIBS+=usr/lib/libkadm5srv.so.11
OLD_FILES+=usr/lib/libkadm5srv_p.a
OLD_FILES+=usr/lib/libkafs5.a
OLD_FILES+=usr/lib/libkafs5.so
OLD_LIBS+=usr/lib/libkafs5.so.11
OLD_FILES+=usr/lib/libkafs5_p.a
OLD_FILES+=usr/lib/libkdc.a
OLD_FILES+=usr/lib/libkdc.so
OLD_LIBS+=usr/lib/libkdc.so.11
OLD_FILES+=usr/lib/libkdc_p.a
OLD_FILES+=usr/lib/libkrb5.a
OLD_FILES+=usr/lib/libkrb5.so
OLD_LIBS+=usr/lib/libkrb5.so.11
OLD_FILES+=usr/lib/libkrb5_p.a
OLD_FILES+=usr/lib/libroken.a
OLD_FILES+=usr/lib/libroken.so
OLD_LIBS+=usr/lib/libroken.so.11
OLD_FILES+=usr/lib/libroken_p.a
OLD_FILES+=usr/lib/libwind.a
OLD_FILES+=usr/lib/libwind.so
OLD_LIBS+=usr/lib/libwind.so.11
OLD_FILES+=usr/lib/libwind_p.a
OLD_FILES+=usr/lib/pam_krb5.so
OLD_LIBS+=usr/lib/pam_krb5.so.6
OLD_FILES+=usr/lib/pam_ksu.so
OLD_LIBS+=usr/lib/pam_ksu.so.6
OLD_FILES+=usr/lib/private/libheimipcc.a
OLD_FILES+=usr/lib/private/libheimipcc.so
OLD_LIBS+=usr/lib/private/libheimipcc.so.11
OLD_FILES+=usr/lib/private/libheimipcc_p.a
OLD_FILES+=usr/lib/private/libheimipcs.a
OLD_FILES+=usr/lib/private/libheimipcs.so
OLD_LIBS+=usr/lib/private/libheimipcs.so.11
OLD_FILES+=usr/lib/private/libheimipcs_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libasn1.a
OLD_FILES+=usr/lib32/libasn1.so
OLD_LIBS+=usr/lib32/libasn1.so.11
OLD_FILES+=usr/lib32/libasn1_p.a
OLD_FILES+=usr/lib32/libgssapi_krb5.a
OLD_FILES+=usr/lib32/libgssapi_krb5.so
OLD_LIBS+=usr/lib32/libgssapi_krb5.so.10
OLD_FILES+=usr/lib32/libgssapi_krb5_p.a
OLD_FILES+=usr/lib32/libgssapi_ntlm.a
OLD_FILES+=usr/lib32/libgssapi_ntlm.so
OLD_LIBS+=usr/lib32/libgssapi_ntlm.so.10
OLD_FILES+=usr/lib32/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib32/libgssapi_spnego.a
OLD_FILES+=usr/lib32/libgssapi_spnego.so
OLD_LIBS+=usr/lib32/libgssapi_spnego.so.10
OLD_FILES+=usr/lib32/libgssapi_spnego_p.a
OLD_FILES+=usr/lib32/libhdb.a
OLD_FILES+=usr/lib32/libhdb.so
OLD_LIBS+=usr/lib32/libhdb.so.11
OLD_FILES+=usr/lib32/libhdb_p.a
OLD_FILES+=usr/lib32/libheimbase.a
OLD_FILES+=usr/lib32/libheimbase.so
OLD_LIBS+=usr/lib32/libheimbase.so.11
OLD_FILES+=usr/lib32/libheimbase_p.a
OLD_FILES+=usr/lib32/libheimntlm.a
OLD_FILES+=usr/lib32/libheimntlm.so
OLD_LIBS+=usr/lib32/libheimntlm.so.11
OLD_FILES+=usr/lib32/libheimntlm_p.a
OLD_FILES+=usr/lib32/libheimsqlite.a
OLD_FILES+=usr/lib32/libheimsqlite.so
OLD_LIBS+=usr/lib32/libheimsqlite.so.11
OLD_FILES+=usr/lib32/libheimsqlite_p.a
OLD_FILES+=usr/lib32/libhx509.a
OLD_FILES+=usr/lib32/libhx509.so
OLD_LIBS+=usr/lib32/libhx509.so.11
OLD_FILES+=usr/lib32/libhx509_p.a
OLD_FILES+=usr/lib32/libkadm5clnt.a
OLD_FILES+=usr/lib32/libkadm5clnt.so
OLD_LIBS+=usr/lib32/libkadm5clnt.so.11
OLD_FILES+=usr/lib32/libkadm5clnt_p.a
OLD_FILES+=usr/lib32/libkadm5srv.a
OLD_FILES+=usr/lib32/libkadm5srv.so
OLD_LIBS+=usr/lib32/libkadm5srv.so.11
OLD_FILES+=usr/lib32/libkadm5srv_p.a
OLD_FILES+=usr/lib32/libkafs5.a
OLD_FILES+=usr/lib32/libkafs5.so
OLD_LIBS+=usr/lib32/libkafs5.so.11
OLD_FILES+=usr/lib32/libkafs5_p.a
OLD_FILES+=usr/lib32/libkdc.a
OLD_FILES+=usr/lib32/libkdc.so
OLD_LIBS+=usr/lib32/libkdc.so.11
OLD_FILES+=usr/lib32/libkdc_p.a
OLD_FILES+=usr/lib32/libkrb5.a
OLD_FILES+=usr/lib32/libkrb5.so
OLD_LIBS+=usr/lib32/libkrb5.so.11
OLD_FILES+=usr/lib32/libkrb5_p.a
OLD_FILES+=usr/lib32/libroken.a
OLD_FILES+=usr/lib32/libroken.so
OLD_LIBS+=usr/lib32/libroken.so.11
OLD_FILES+=usr/lib32/libroken_p.a
OLD_FILES+=usr/lib32/libwind.a
OLD_FILES+=usr/lib32/libwind.so
OLD_LIBS+=usr/lib32/libwind.so.11
OLD_FILES+=usr/lib32/libwind_p.a
OLD_FILES+=usr/lib32/pam_krb5.so
OLD_LIBS+=usr/lib32/pam_krb5.so.6
OLD_FILES+=usr/lib32/pam_ksu.so
OLD_LIBS+=usr/lib32/pam_ksu.so.6
OLD_FILES+=usr/lib32/private/libheimipcc.a
OLD_FILES+=usr/lib32/private/libheimipcc.so
OLD_LIBS+=usr/lib32/private/libheimipcc.so.11
OLD_FILES+=usr/lib32/private/libheimipcc_p.a
OLD_FILES+=usr/lib32/private/libheimipcs.a
OLD_FILES+=usr/lib32/private/libheimipcs.so
OLD_LIBS+=usr/lib32/private/libheimipcs.so.11
OLD_FILES+=usr/lib32/private/libheimipcs_p.a
.endif
OLD_FILES+=usr/libexec/digest-service
OLD_FILES+=usr/libexec/hprop
OLD_FILES+=usr/libexec/hpropd
OLD_FILES+=usr/libexec/ipropd-master
OLD_FILES+=usr/libexec/ipropd-slave
OLD_FILES+=usr/libexec/kadmind
OLD_FILES+=usr/libexec/kcm
OLD_FILES+=usr/libexec/kdc
OLD_FILES+=usr/libexec/kdigest
OLD_FILES+=usr/libexec/kfd
OLD_FILES+=usr/libexec/kimpersonate
OLD_FILES+=usr/libexec/kpasswdd
OLD_FILES+=usr/sbin/kstash
OLD_FILES+=usr/sbin/ktutil
OLD_FILES+=usr/sbin/iprop-log
OLD_FILES+=usr/share/man/man1/kdestroy.1.gz
OLD_FILES+=usr/share/man/man1/kf.1.gz
OLD_FILES+=usr/share/man/man1/kinit.1.gz
OLD_FILES+=usr/share/man/man1/klist.1.gz
OLD_FILES+=usr/share/man/man1/kpasswd.1.gz
OLD_FILES+=usr/share/man/man1/krb5-config.1.gz
OLD_FILES+=usr/share/man/man1/kswitch.1.gz
OLD_FILES+=usr/share/man/man3/HDB.3.gz
OLD_FILES+=usr/share/man/man3/hdb__del.3.gz
OLD_FILES+=usr/share/man/man3/hdb__get.3.gz
OLD_FILES+=usr/share/man/man3/hdb__put.3.gz
OLD_FILES+=usr/share/man/man3/hdb_auth_status.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_constrained_delegation.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_pkinit_ms_upn_match.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_s4u2self.3.gz
OLD_FILES+=usr/share/man/man3/hdb_close.3.gz
OLD_FILES+=usr/share/man/man3/hdb_destroy.3.gz
OLD_FILES+=usr/share/man/man3/hdb_entry_ex.3.gz
OLD_FILES+=usr/share/man/man3/hdb_fetch_kvno.3.gz
OLD_FILES+=usr/share/man/man3/hdb_firstkey.3.gz
OLD_FILES+=usr/share/man/man3/hdb_free.3.gz
OLD_FILES+=usr/share/man/man3/hdb_get_realms.3.gz
OLD_FILES+=usr/share/man/man3/hdb_lock.3.gz
OLD_FILES+=usr/share/man/man3/hdb_name.3.gz
OLD_FILES+=usr/share/man/man3/hdb_nextkey.3.gz
OLD_FILES+=usr/share/man/man3/hdb_open.3.gz
OLD_FILES+=usr/share/man/man3/hdb_password.3.gz
OLD_FILES+=usr/share/man/man3/hdb_remove.3.gz
OLD_FILES+=usr/share/man/man3/hdb_rename.3.gz
OLD_FILES+=usr/share/man/man3/hdb_store.3.gz
OLD_FILES+=usr/share/man/man3/hdb_unlock.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm1_master.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm2_master.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_lm2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_decode_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type3.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_buf.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type3.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_keyex_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_nt_key.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_ntlmv2_key.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_verify_ntlm2.3.gz
OLD_FILES+=usr/share/man/man3/hx509.3.gz
OLD_FILES+=usr/share/man/man3/hx509_bitstring_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_sign.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_sign_self.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_crl_dp_uri.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_hostname.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_jid.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_ms_upn.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_otherName.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_pkinit.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_rfc822name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_ca.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_domaincontroller.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notBefore.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_proxy.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_serialnumber.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_spki.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_template.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_unique.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_subject_expand.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_template_units.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_binary.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_check_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_cmp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_find_subjectAltName_otherName.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI_AlgorithmIdentifier.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_attribute.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_base_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer_unique_id.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_notAfter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_notBefore.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_serialnumber.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject_unique_id.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_init_data.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_keyusage_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_ref.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_set_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_add.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_append.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_end_seq.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_filter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_find.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_info.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_iter_f.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_merge.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_next_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_start_seq.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_store.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ci_print_names.3.gz
OLD_FILES+=usr/share/man/man3/hx509_clear_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_create_signed_1.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_envelope_1.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_unenvelope.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_unwrap_ContentInfo.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_verify_signed.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_wrap_ContentInfo.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_set_missing_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_add_revoked_certs.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_sign.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crypto.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_add.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_add_binding.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_find.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_find_binding.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_lfind.3.gz
OLD_FILES+=usr/share/man/man3/hx509_err.3.gz
OLD_FILES+=usr/share/man/man3/hx509_error.3.gz
OLD_FILES+=usr/share/man/man3/hx509_free_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_free_octet_string_list.3.gz
OLD_FILES+=usr/share/man/man3/hx509_general_name_unparse.3.gz
OLD_FILES+=usr/share/man/man3/hx509_get_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_get_one_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_keyset.3.gz
OLD_FILES+=usr/share/man/man3/hx509_lock.3.gz
OLD_FILES+=usr/share/man/man3/hx509_misc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_binary.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_cmp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_copy.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_expand.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_is_null_p.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_to_Name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_to_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ocsp_request.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ocsp_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_oid_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_oid_sprint.3.gz
OLD_FILES+=usr/share/man/man3/hx509_parse_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_add_cms_alg.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cms_algs.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print_stdout.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_cmp_func.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_issuer_serial.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_option.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_statistic_file.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_unparse_stats.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_add_crl.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_add_ocsp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_ocsp_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_set_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_set_error_stringv.3.gz
OLD_FILES+=usr/share/man/man3/hx509_unparse_der_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_add_flags.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_set_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_attach_anchors.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_attach_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_ctx_f_allow_default_trustanchors.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_destroy_ctx.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_hostname.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_init_ctx.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_path.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_max_depth.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_proxy_certificate.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_strict_rfc3280_verification.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_time.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_signature.3.gz
OLD_FILES+=usr/share/man/man3/hx509_xfree.3.gz
OLD_FILES+=usr/share/man/man3/k_afs_cell_of_file.3.gz
OLD_FILES+=usr/share/man/man3/k_hasafs.3.gz
OLD_FILES+=usr/share/man/man3/k_pioctl.3.gz
OLD_FILES+=usr/share/man/man3/k_setpag.3.gz
OLD_FILES+=usr/share/man/man3/k_unlog.3.gz
OLD_FILES+=usr/share/man/man3/kadm5_pwcheck.3.gz
OLD_FILES+=usr/share/man/man3/kafs.3.gz
OLD_FILES+=usr/share/man/man3/kafs5.3.gz
OLD_FILES+=usr/share/man/man3/kafs_set_verbose.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken5.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken_rxkad.3.gz
OLD_FILES+=usr/share/man/man3/krb5.3.gz
OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc_ccache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_524_conv_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acl_match_file.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acl_match_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_et_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addlog_dest.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addlog_func.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addr2sockaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_order.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_prefixlen_boundary.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_search.3.gz
OLD_FILES+=usr/share/man/man3/krb5_afslog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_afslog_uid.3.gz
OLD_FILES+=usr/share/man/man3/krb5_allow_weak_crypto.3.gz
OLD_FILES+=usr/share/man/man3/krb5_aname_to_localname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_anyaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_boolean.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_append_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_genaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getflags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getlocalsubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getrcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getremotesubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getuserkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_initivector.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs_from_fd.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setflags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setivector.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setlocalsubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setrcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setremotesubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setuserkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getauthenticator.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getcksumtype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getkeytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getlocalseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getremoteseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setcksumtype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setkeytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setlocalseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setremoteseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_va.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_va_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_c_enctype_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_c_make_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_get_first.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_match.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_clear_mcred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_close.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_cache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_match_f.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_gen_new.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_config.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_full_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_kdc_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_prefix_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_initialize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_last_change_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_move.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_new_unique.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_next_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_remove_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_resolve.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_retrieve_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_config.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_kdc_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_start_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_store_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_support_switch.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_switch.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ccache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ccache_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_new.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_last_change_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_change_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_check_transited.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksum_is_collision_proof.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksum_is_keyed.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksumsize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cksumtype_to_enctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_clear_error_message.3.gz
OLD_FILES+=usr/share/man/man3/krb5_clear_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_closelog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_compare_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_file_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_free_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_bool.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_bool_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_string_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_time_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_parse_file_multi.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_parse_string_multi.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_string_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_time_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_creds_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_create_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_create_checksum_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_credential.3.gz
OLD_FILES+=usr/share/man/man3/krb5_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_creds_get_ticket_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_fx_cf2.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getblocksize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getconfoundersize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getenctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getpadsize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_cmp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_copy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_ct_cmp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_realloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_zero.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt_EncryptedData.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt_iov_ivec.3.gz
OLD_FILES+=usr/share/man/man3/krb5_deprecated.3.gz
OLD_FILES+=usr/share/man/man3/krb5_digest.3.gz
OLD_FILES+=usr/share/man/man3/krb5_digest_probe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_eai_to_heim_errno.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt_EncryptedData.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt_iov_ivec.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_disable.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_enable.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_valid.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctypes_compatible_keys.3.gz
OLD_FILES+=usr/share/man/man3/krb5_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_expand_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_expand_hostname_realms.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fcc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fileformats.3.gz
OLD_FILES+=usr/share/man/man3/krb5_find_padata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_cred_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_creds_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_data_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_keyblock_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_krbhst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_unparsed_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fwd_tgt_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_random_block.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_subkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_subkey_extended.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_all_client_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_all_server_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc_opt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_credentials.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_in_tkt_etypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_realms.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_dns_canonicalize_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_fcache_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_forwarded_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_skey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_get_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_kdc_sec_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb524hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb_admin_hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb_changepw_hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krbhst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_max_time_skew.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_use_admin_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_validated_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_getportbyname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_addr2addr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_addr2sockaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_errno_to_heim_errno.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_get_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_service.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_step.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_ets.3.gz
OLD_FILES+=usr/share/man/man3/krb5_initlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_introduction.3.gz
OLD_FILES+=usr/share/man/man3/krb5_is_config_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_is_thread_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kerberos_enctypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_get_enctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_zero.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab_key_proc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_format_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_get_addrinfo.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_next_as_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_reset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_add_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_close.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_copy_entry_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default_modify_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_free_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_full_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_have_content.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_next_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_read_service_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_remove_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_resolve.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_start_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kuserok.3.gz
OLD_FILES+=usr/share/man/man3/krb5_log.3.gz
OLD_FILES+=usr/share/man/man3/krb5_log_msg.3.gz
OLD_FILES+=usr/share/man/man3/krb5_make_addrport.3.gz
OLD_FILES+=usr/share/man/man3/krb5_make_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_max_sockaddr_size.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mcc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mk_req.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mk_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_openlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac_get_buffer.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac_verify.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_name_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_nametype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_password_key_proc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_plugin_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_prepend_config_files_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_princ_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_princ_set_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_compare_any_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_comp_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_num_comp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_is_krbtgt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_match.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_set_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_set_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_print_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_random_to_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_ctx.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_ctx_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_pac_check.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_ctx_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_get_server.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_realm_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_authdata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_creds_tag.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_stringz.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_times.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_default_in_tkt_etypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_default_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_dns_canonicalize_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_error_message.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_fcache_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_home_dir_access.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_kdc_sec_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_max_time_skew.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_real_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_use_admin_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sname_to_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sock_to_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr2address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr2port.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr_uninteresting.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_clear_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_emem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_fd.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_mem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_readonly_mem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_get_byteorder.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_get_eof_code.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_is_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_read.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_seek.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_byteorder.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_eof_code.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_max_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_to_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_truncate.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_write.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_authdata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_creds_tag.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_stringz.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_times.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_string_to_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_string_to_keytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_support.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_authorization_data_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_client.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_endtime.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_server.3.gz
OLD_FILES+=usr/share/man/man3/krb5_timeofday.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_short.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_short.3.gz
OLD_FILES+=usr/share/man/man3/krb5_us_timeofday.3.gz
OLD_FILES+=usr/share/man/man3/krb5_v4compat.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_checksum_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_init_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_secure.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_service.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user_lrealm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user_opt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vlog_msg.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vset_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vwarn.3.gz
OLD_FILES+=usr/share/man/man3/krb_afslog.3.gz
OLD_FILES+=usr/share/man/man3/krb_afslog_uid.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_buf.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_core.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type1.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type2.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type3.3.gz
OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz
OLD_FILES+=usr/share/man/man8/hprop.8.gz
OLD_FILES+=usr/share/man/man8/hpropd.8.gz
OLD_FILES+=usr/share/man/man8/iprop-log.8.gz
OLD_FILES+=usr/share/man/man8/iprop.8.gz
OLD_FILES+=usr/share/man/man8/kadmin.8.gz
OLD_FILES+=usr/share/man/man8/kadmind.8.gz
OLD_FILES+=usr/share/man/man8/kcm.8.gz
OLD_FILES+=usr/share/man/man8/kdc.8.gz
OLD_FILES+=usr/share/man/man8/kdigest.8.gz
OLD_FILES+=usr/share/man/man8/kerberos.8.gz
OLD_FILES+=usr/share/man/man8/kimpersonate.8.gz
OLD_FILES+=usr/share/man/man8/kpasswdd.8.gz
OLD_FILES+=usr/share/man/man8/kstash.8.gz
OLD_FILES+=usr/share/man/man8/ktutil.8.gz
OLD_FILES+=usr/share/man/man8/pam_krb5.8.gz
OLD_FILES+=usr/share/man/man8/pam_ksu.8.gz
OLD_FILES+=usr/share/man/man8/string2key.8.gz
OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz
.endif
.if ${MK_KERBEROS_SUPPORT} == no
OLD_FILES+=usr/bin/compile_et
OLD_FILES+=usr/include/com_err.h
OLD_FILES+=usr/include/com_right.h
OLD_FILES+=usr/lib/libcom_err.a
OLD_FILES+=usr/lib/libcom_err.so
OLD_LIBS+=usr/lib/libcom_err.so.5
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib32/libcom_err.a
OLD_FILES+=usr/lib32/libcom_err.so
OLD_LIBS+=usr/lib32/libcom_err.so.5
OLD_FILES+=usr/lib32/libcom_err_p.a
OLD_FILES+=usr/share/man/man1/compile_et.1.gz
OLD_FILES+=usr/share/man/man3/com_err.3.gz
.endif
.if ${MK_LDNS} == no
OLD_FILES+=usr/lib/private/libldns.a
OLD_FILES+=usr/lib/private/libldns.so
OLD_LIBS+=usr/lib/private/libldns.so.5
OLD_FILES+=usr/lib/private/libldns_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/private/libldns.a
OLD_FILES+=usr/lib32/private/libldns.so
OLD_LIBS+=usr/lib32/private/libldns.so.5
OLD_FILES+=usr/lib32/private/libldns_p.a
.endif
.endif
.if ${MK_LDNS_UTILS} == no
OLD_FILES+=usr/bin/drill
OLD_FILES+=usr/share/man/man1/drill.1.gz
OLD_FILES+=usr/bin/host
OLD_FILES+=usr/share/man/man1/host.1.gz
.endif
.if ${MK_LEGACY_CONSOLE} == no
OLD_FILES+=usr/sbin/kbdcontrol
OLD_FILES+=usr/sbin/kbdmap
OLD_FILES+=usr/sbin/moused
OLD_FILES+=usr/sbin/vidcontrol
OLD_FILES+=usr/sbin/vidfont
OLD_FILES+=usr/share/man/man1/kbdcontrol.1.gz
OLD_FILES+=usr/share/man/man1/kbdmap.1.gz
OLD_FILES+=usr/share/man/man1/vidcontrol.1.gz
OLD_FILES+=usr/share/man/man1/vidfont.1.gz
OLD_FILES+=usr/share/man/man5/kbdmap.5.gz
OLD_FILES+=usr/share/man/man5/keymap.5.gz
OLD_FILES+=usr/share/man/man8/moused.8.gz
.endif
.if ${MK_LIB32} == no
OLD_FILES+=etc/mtree/BSD.lib32.dist
OLD_FILES+=libexec/ld-elf32.so.1
. if exists(${DESTDIR}/usr/lib32)
LIB32_DIRS!=find ${DESTDIR}/usr/lib32 -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
LIB32_FILES!=find ${DESTDIR}/usr/lib32 \! -type d \
\! -name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
LIB32_LIBS!=find ${DESTDIR}/usr/lib32 \! -type d \
-name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${LIB32_DIRS}
OLD_FILES+=${LIB32_FILES}
OLD_LIBS+=${LIB32_LIBS}
. endif
. if ${MK_DEBUG_FILES} == no
. if exists(${DESTDIR}/usr/lib/debug/usr/lib32)
DEBUG_LIB32_DIRS!=find ${DESTDIR}/usr/lib/debug/usr/lib32 -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIB32_FILES!=find ${DESTDIR}/usr/lib/debug/usr/lib32 \! -type d \
\! -name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIB32_LIBS!=find ${DESTDIR}/usr/lib/debug/usr/lib32 \! -type d \
-name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${DEBUG_LIB32_DIRS}
OLD_FILES+=${DEBUG_LIB32_FILES}
OLD_LIBS+=${DEBUG_LIB32_LIBS}
. endif
. endif
.endif
.if ${MK_LIBCPLUSPLUS} == no
OLD_LIBS+=lib/libcxxrt.so.1
OLD_FILES+=usr/lib/libc++.a
OLD_FILES+=usr/lib/libc++_p.a
OLD_FILES+=usr/lib/libc++experimental.a
OLD_FILES+=usr/lib/libc++fs.a
OLD_FILES+=usr/lib/libc++.so
OLD_LIBS+=usr/lib/libc++.so.1
OLD_FILES+=usr/lib/libcxxrt.a
OLD_FILES+=usr/lib/libcxxrt.so
OLD_FILES+=usr/lib/libcxxrt_p.a
OLD_FILES+=usr/include/c++/v1/__bit_reference
OLD_FILES+=usr/include/c++/v1/__bsd_locale_defaults.h
OLD_FILES+=usr/include/c++/v1/__bsd_locale_fallbacks.h
OLD_FILES+=usr/include/c++/v1/__config
OLD_FILES+=usr/include/c++/v1/__debug
OLD_FILES+=usr/include/c++/v1/__errc
OLD_FILES+=usr/include/c++/v1/__functional_03
OLD_FILES+=usr/include/c++/v1/__functional_base
OLD_FILES+=usr/include/c++/v1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/__hash_table
OLD_FILES+=usr/include/c++/v1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/__locale
OLD_FILES+=usr/include/c++/v1/__mutex_base
OLD_FILES+=usr/include/c++/v1/__node_handle
OLD_FILES+=usr/include/c++/v1/__nullptr
OLD_FILES+=usr/include/c++/v1/__split_buffer
OLD_FILES+=usr/include/c++/v1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/__std_stream
OLD_FILES+=usr/include/c++/v1/__string
OLD_FILES+=usr/include/c++/v1/__threading_support
OLD_FILES+=usr/include/c++/v1/__tree
OLD_FILES+=usr/include/c++/v1/__tuple
OLD_FILES+=usr/include/c++/v1/__undef_macros
OLD_FILES+=usr/include/c++/v1/algorithm
OLD_FILES+=usr/include/c++/v1/any
OLD_FILES+=usr/include/c++/v1/array
OLD_FILES+=usr/include/c++/v1/atomic
OLD_FILES+=usr/include/c++/v1/bit
OLD_FILES+=usr/include/c++/v1/bitset
OLD_FILES+=usr/include/c++/v1/cassert
OLD_FILES+=usr/include/c++/v1/ccomplex
OLD_FILES+=usr/include/c++/v1/cctype
OLD_FILES+=usr/include/c++/v1/cerrno
OLD_FILES+=usr/include/c++/v1/cfenv
OLD_FILES+=usr/include/c++/v1/cfloat
OLD_FILES+=usr/include/c++/v1/charconv
OLD_FILES+=usr/include/c++/v1/chrono
OLD_FILES+=usr/include/c++/v1/cinttypes
OLD_FILES+=usr/include/c++/v1/ciso646
OLD_FILES+=usr/include/c++/v1/climits
OLD_FILES+=usr/include/c++/v1/clocale
OLD_FILES+=usr/include/c++/v1/cmath
OLD_FILES+=usr/include/c++/v1/codecvt
OLD_FILES+=usr/include/c++/v1/compare
OLD_FILES+=usr/include/c++/v1/complex
OLD_FILES+=usr/include/c++/v1/complex.h
OLD_FILES+=usr/include/c++/v1/condition_variable
OLD_FILES+=usr/include/c++/v1/csetjmp
OLD_FILES+=usr/include/c++/v1/csignal
OLD_FILES+=usr/include/c++/v1/cstdarg
OLD_FILES+=usr/include/c++/v1/cstdbool
OLD_FILES+=usr/include/c++/v1/cstddef
OLD_FILES+=usr/include/c++/v1/cstdint
OLD_FILES+=usr/include/c++/v1/cstdio
OLD_FILES+=usr/include/c++/v1/cstdlib
OLD_FILES+=usr/include/c++/v1/cstring
OLD_FILES+=usr/include/c++/v1/ctgmath
OLD_FILES+=usr/include/c++/v1/ctime
OLD_FILES+=usr/include/c++/v1/ctype.h
OLD_FILES+=usr/include/c++/v1/cwchar
OLD_FILES+=usr/include/c++/v1/cwctype
OLD_FILES+=usr/include/c++/v1/cxxabi.h
OLD_FILES+=usr/include/c++/v1/deque
OLD_FILES+=usr/include/c++/v1/errno.h
OLD_FILES+=usr/include/c++/v1/exception
OLD_FILES+=usr/include/c++/v1/experimental/__config
OLD_FILES+=usr/include/c++/v1/experimental/__memory
OLD_FILES+=usr/include/c++/v1/experimental/algorithm
OLD_FILES+=usr/include/c++/v1/experimental/any
OLD_FILES+=usr/include/c++/v1/experimental/chrono
OLD_FILES+=usr/include/c++/v1/experimental/coroutine
OLD_FILES+=usr/include/c++/v1/experimental/deque
OLD_FILES+=usr/include/c++/v1/experimental/dynarray
OLD_FILES+=usr/include/c++/v1/experimental/filesystem
OLD_FILES+=usr/include/c++/v1/experimental/forward_list
OLD_FILES+=usr/include/c++/v1/experimental/functional
OLD_FILES+=usr/include/c++/v1/experimental/iterator
OLD_FILES+=usr/include/c++/v1/experimental/list
OLD_FILES+=usr/include/c++/v1/experimental/map
OLD_FILES+=usr/include/c++/v1/experimental/memory_resource
OLD_FILES+=usr/include/c++/v1/experimental/numeric
OLD_FILES+=usr/include/c++/v1/experimental/optional
OLD_FILES+=usr/include/c++/v1/experimental/propagate_const
OLD_FILES+=usr/include/c++/v1/experimental/ratio
OLD_FILES+=usr/include/c++/v1/experimental/regex
OLD_FILES+=usr/include/c++/v1/experimental/set
OLD_FILES+=usr/include/c++/v1/experimental/simd
OLD_FILES+=usr/include/c++/v1/experimental/string
OLD_FILES+=usr/include/c++/v1/experimental/string_view
OLD_FILES+=usr/include/c++/v1/experimental/system_error
OLD_FILES+=usr/include/c++/v1/experimental/tuple
OLD_FILES+=usr/include/c++/v1/experimental/type_traits
OLD_FILES+=usr/include/c++/v1/experimental/unordered_map
OLD_FILES+=usr/include/c++/v1/experimental/unordered_set
OLD_FILES+=usr/include/c++/v1/experimental/utility
OLD_FILES+=usr/include/c++/v1/experimental/vector
OLD_FILES+=usr/include/c++/v1/ext/__hash
OLD_FILES+=usr/include/c++/v1/ext/hash_map
OLD_FILES+=usr/include/c++/v1/ext/hash_set
OLD_FILES+=usr/include/c++/v1/filesystem
OLD_FILES+=usr/include/c++/v1/float.h
OLD_FILES+=usr/include/c++/v1/forward_list
OLD_FILES+=usr/include/c++/v1/fstream
OLD_FILES+=usr/include/c++/v1/functional
OLD_FILES+=usr/include/c++/v1/future
OLD_FILES+=usr/include/c++/v1/initializer_list
OLD_FILES+=usr/include/c++/v1/inttypes.h
OLD_FILES+=usr/include/c++/v1/iomanip
OLD_FILES+=usr/include/c++/v1/ios
OLD_FILES+=usr/include/c++/v1/iosfwd
OLD_FILES+=usr/include/c++/v1/iostream
OLD_FILES+=usr/include/c++/v1/istream
OLD_FILES+=usr/include/c++/v1/iterator
OLD_FILES+=usr/include/c++/v1/limits
OLD_FILES+=usr/include/c++/v1/limits.h
OLD_FILES+=usr/include/c++/v1/list
OLD_FILES+=usr/include/c++/v1/locale
OLD_FILES+=usr/include/c++/v1/locale.h
OLD_FILES+=usr/include/c++/v1/map
OLD_FILES+=usr/include/c++/v1/math.h
OLD_FILES+=usr/include/c++/v1/memory
OLD_FILES+=usr/include/c++/v1/mutex
OLD_FILES+=usr/include/c++/v1/new
OLD_FILES+=usr/include/c++/v1/numeric
OLD_FILES+=usr/include/c++/v1/numeric
OLD_FILES+=usr/include/c++/v1/optional
OLD_FILES+=usr/include/c++/v1/ostream
OLD_FILES+=usr/include/c++/v1/queue
OLD_FILES+=usr/include/c++/v1/random
OLD_FILES+=usr/include/c++/v1/ratio
OLD_FILES+=usr/include/c++/v1/regex
OLD_FILES+=usr/include/c++/v1/scoped_allocator
OLD_FILES+=usr/include/c++/v1/set
OLD_FILES+=usr/include/c++/v1/setjmp.h
OLD_FILES+=usr/include/c++/v1/shared_mutex
OLD_FILES+=usr/include/c++/v1/span
OLD_FILES+=usr/include/c++/v1/sstream
OLD_FILES+=usr/include/c++/v1/stack
OLD_FILES+=usr/include/c++/v1/stdbool.h
OLD_FILES+=usr/include/c++/v1/stddef.h
OLD_FILES+=usr/include/c++/v1/stdexcept
OLD_FILES+=usr/include/c++/v1/stdint.h
OLD_FILES+=usr/include/c++/v1/stdio.h
OLD_FILES+=usr/include/c++/v1/stdlib.h
OLD_FILES+=usr/include/c++/v1/streambuf
OLD_FILES+=usr/include/c++/v1/string
OLD_FILES+=usr/include/c++/v1/string.h
OLD_FILES+=usr/include/c++/v1/string_view
OLD_FILES+=usr/include/c++/v1/strstream
OLD_FILES+=usr/include/c++/v1/system_error
OLD_FILES+=usr/include/c++/v1/tgmath.h
OLD_FILES+=usr/include/c++/v1/thread
OLD_FILES+=usr/include/c++/v1/version
OLD_FILES+=usr/include/c++/v1/tr1/__bit_reference
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_defaults.h
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_fallbacks.h
OLD_FILES+=usr/include/c++/v1/tr1/__config
OLD_FILES+=usr/include/c++/v1/tr1/__debug
OLD_FILES+=usr/include/c++/v1/tr1/__functional_03
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/tr1/__hash_table
OLD_FILES+=usr/include/c++/v1/tr1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/tr1/__locale
OLD_FILES+=usr/include/c++/v1/tr1/__mutex_base
OLD_FILES+=usr/include/c++/v1/tr1/__nullptr
OLD_FILES+=usr/include/c++/v1/tr1/__split_buffer
OLD_FILES+=usr/include/c++/v1/tr1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/tr1/__std_stream
OLD_FILES+=usr/include/c++/v1/tr1/__string
OLD_FILES+=usr/include/c++/v1/tr1/__threading_support
OLD_FILES+=usr/include/c++/v1/tr1/__tree
OLD_FILES+=usr/include/c++/v1/tr1/__tuple
OLD_FILES+=usr/include/c++/v1/tr1/__undef_macros
OLD_FILES+=usr/include/c++/v1/tr1/algorithm
OLD_FILES+=usr/include/c++/v1/tr1/any
OLD_FILES+=usr/include/c++/v1/tr1/array
OLD_FILES+=usr/include/c++/v1/tr1/atomic
OLD_FILES+=usr/include/c++/v1/tr1/bitset
OLD_FILES+=usr/include/c++/v1/tr1/cassert
OLD_FILES+=usr/include/c++/v1/tr1/ccomplex
OLD_FILES+=usr/include/c++/v1/tr1/cctype
OLD_FILES+=usr/include/c++/v1/tr1/cerrno
OLD_FILES+=usr/include/c++/v1/tr1/cfenv
OLD_FILES+=usr/include/c++/v1/tr1/cfloat
OLD_FILES+=usr/include/c++/v1/tr1/chrono
OLD_FILES+=usr/include/c++/v1/tr1/cinttypes
OLD_FILES+=usr/include/c++/v1/tr1/ciso646
OLD_FILES+=usr/include/c++/v1/tr1/climits
OLD_FILES+=usr/include/c++/v1/tr1/clocale
OLD_FILES+=usr/include/c++/v1/tr1/cmath
OLD_FILES+=usr/include/c++/v1/tr1/codecvt
OLD_FILES+=usr/include/c++/v1/tr1/complex
OLD_FILES+=usr/include/c++/v1/tr1/complex.h
OLD_FILES+=usr/include/c++/v1/tr1/condition_variable
OLD_FILES+=usr/include/c++/v1/tr1/csetjmp
OLD_FILES+=usr/include/c++/v1/tr1/csignal
OLD_FILES+=usr/include/c++/v1/tr1/cstdarg
OLD_FILES+=usr/include/c++/v1/tr1/cstdbool
OLD_FILES+=usr/include/c++/v1/tr1/cstddef
OLD_FILES+=usr/include/c++/v1/tr1/cstdint
OLD_FILES+=usr/include/c++/v1/tr1/cstdio
OLD_FILES+=usr/include/c++/v1/tr1/cstdlib
OLD_FILES+=usr/include/c++/v1/tr1/cstring
OLD_FILES+=usr/include/c++/v1/tr1/ctgmath
OLD_FILES+=usr/include/c++/v1/tr1/ctime
OLD_FILES+=usr/include/c++/v1/tr1/ctype.h
OLD_FILES+=usr/include/c++/v1/tr1/cwchar
OLD_FILES+=usr/include/c++/v1/tr1/cwctype
OLD_FILES+=usr/include/c++/v1/tr1/deque
OLD_FILES+=usr/include/c++/v1/tr1/errno.h
OLD_FILES+=usr/include/c++/v1/tr1/exception
OLD_FILES+=usr/include/c++/v1/tr1/float.h
OLD_FILES+=usr/include/c++/v1/tr1/forward_list
OLD_FILES+=usr/include/c++/v1/tr1/fstream
OLD_FILES+=usr/include/c++/v1/tr1/functional
OLD_FILES+=usr/include/c++/v1/tr1/future
OLD_FILES+=usr/include/c++/v1/tr1/initializer_list
OLD_FILES+=usr/include/c++/v1/tr1/inttypes.h
OLD_FILES+=usr/include/c++/v1/tr1/iomanip
OLD_FILES+=usr/include/c++/v1/tr1/ios
OLD_FILES+=usr/include/c++/v1/tr1/iosfwd
OLD_FILES+=usr/include/c++/v1/tr1/iostream
OLD_FILES+=usr/include/c++/v1/tr1/istream
OLD_FILES+=usr/include/c++/v1/tr1/iterator
OLD_FILES+=usr/include/c++/v1/tr1/limits
OLD_FILES+=usr/include/c++/v1/tr1/limits.h
OLD_FILES+=usr/include/c++/v1/tr1/list
OLD_FILES+=usr/include/c++/v1/tr1/locale
OLD_FILES+=usr/include/c++/v1/tr1/locale.h
OLD_FILES+=usr/include/c++/v1/tr1/map
OLD_FILES+=usr/include/c++/v1/tr1/math.h
OLD_FILES+=usr/include/c++/v1/tr1/memory
OLD_FILES+=usr/include/c++/v1/tr1/mutex
OLD_FILES+=usr/include/c++/v1/tr1/new
OLD_FILES+=usr/include/c++/v1/tr1/numeric
OLD_FILES+=usr/include/c++/v1/tr1/numeric
OLD_FILES+=usr/include/c++/v1/tr1/optional
OLD_FILES+=usr/include/c++/v1/tr1/ostream
OLD_FILES+=usr/include/c++/v1/tr1/queue
OLD_FILES+=usr/include/c++/v1/tr1/random
OLD_FILES+=usr/include/c++/v1/tr1/ratio
OLD_FILES+=usr/include/c++/v1/tr1/regex
OLD_FILES+=usr/include/c++/v1/tr1/scoped_allocator
OLD_FILES+=usr/include/c++/v1/tr1/set
OLD_FILES+=usr/include/c++/v1/tr1/setjmp.h
OLD_FILES+=usr/include/c++/v1/tr1/shared_mutex
OLD_FILES+=usr/include/c++/v1/tr1/sstream
OLD_FILES+=usr/include/c++/v1/tr1/stack
OLD_FILES+=usr/include/c++/v1/tr1/stdbool.h
OLD_FILES+=usr/include/c++/v1/tr1/stddef.h
OLD_FILES+=usr/include/c++/v1/tr1/stdexcept
OLD_FILES+=usr/include/c++/v1/tr1/stdint.h
OLD_FILES+=usr/include/c++/v1/tr1/stdio.h
OLD_FILES+=usr/include/c++/v1/tr1/stdlib.h
OLD_FILES+=usr/include/c++/v1/tr1/streambuf
OLD_FILES+=usr/include/c++/v1/tr1/string
OLD_FILES+=usr/include/c++/v1/tr1/string.h
OLD_FILES+=usr/include/c++/v1/tr1/string_view
OLD_FILES+=usr/include/c++/v1/tr1/strstream
OLD_FILES+=usr/include/c++/v1/tr1/system_error
OLD_FILES+=usr/include/c++/v1/tr1/tgmath.h
OLD_FILES+=usr/include/c++/v1/tr1/thread
OLD_FILES+=usr/include/c++/v1/tr1/tuple
OLD_FILES+=usr/include/c++/v1/tr1/type_traits
OLD_FILES+=usr/include/c++/v1/tr1/typeindex
OLD_FILES+=usr/include/c++/v1/tr1/typeinfo
OLD_FILES+=usr/include/c++/v1/tr1/unordered_map
OLD_FILES+=usr/include/c++/v1/tr1/unordered_set
OLD_FILES+=usr/include/c++/v1/tr1/utility
OLD_FILES+=usr/include/c++/v1/tr1/valarray
OLD_FILES+=usr/include/c++/v1/tr1/variant
OLD_FILES+=usr/include/c++/v1/tr1/vector
OLD_FILES+=usr/include/c++/v1/tr1/wchar.h
OLD_FILES+=usr/include/c++/v1/tr1/wctype.h
OLD_FILES+=usr/include/c++/v1/tuple
OLD_FILES+=usr/include/c++/v1/type_traits
OLD_FILES+=usr/include/c++/v1/typeindex
OLD_FILES+=usr/include/c++/v1/typeinfo
OLD_FILES+=usr/include/c++/v1/unordered_map
OLD_FILES+=usr/include/c++/v1/unordered_set
OLD_FILES+=usr/include/c++/v1/unwind-arm.h
OLD_FILES+=usr/include/c++/v1/unwind-itanium.h
OLD_FILES+=usr/include/c++/v1/unwind.h
OLD_FILES+=usr/include/c++/v1/utility
OLD_FILES+=usr/include/c++/v1/valarray
OLD_FILES+=usr/include/c++/v1/variant
OLD_FILES+=usr/include/c++/v1/vector
OLD_FILES+=usr/include/c++/v1/wchar.h
OLD_FILES+=usr/include/c++/v1/wctype.h
OLD_FILES+=usr/lib32/libc++.a
OLD_FILES+=usr/lib32/libc++.so
OLD_LIBS+=usr/lib32/libc++.so.1
OLD_FILES+=usr/lib32/libc++_p.a
OLD_FILES+=usr/lib32/libc++experimental.a
OLD_FILES+=usr/lib32/libc++fs.a
OLD_FILES+=usr/lib32/libcxxrt.a
OLD_FILES+=usr/lib32/libcxxrt.so
OLD_LIBS+=usr/lib32/libcxxrt.so.1
OLD_FILES+=usr/lib32/libcxxrt_p.a
OLD_DIRS+=usr/include/c++/v1/tr1
OLD_DIRS+=usr/include/c++/v1/experimental
OLD_DIRS+=usr/include/c++/v1/ext
OLD_DIRS+=usr/include/c++/v1
.endif
.if ${MK_LIBTHR} == no
OLD_LIBS+=lib/libthr.so.3
OLD_FILES+=usr/lib/libthr.a
OLD_FILES+=usr/lib/libthr_p.a
OLD_FILES+=usr/share/man/man3/libthr.3.gz
.endif
.if ${MK_LLD} == no
OLD_FILES+=usr/bin/ld.lld
.endif
.if ${MK_LLDB} == no
OLD_FILES+=usr/bin/lldb
OLD_FILES+=usr/share/man/man1/lldb.1.gz
.endif
.if ${MK_LOCALES} == no
OLD_DIRS+=usr/share/locale
OLD_DIRS+=usr/share/locale/af_ZA.ISO8859-15
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/af_ZA.ISO8859-1
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/af_ZA.UTF-8
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/am_ET.UTF-8
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_AE.UTF-8
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_EG.UTF-8
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_JO.UTF-8
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_MA.UTF-8
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_QA.UTF-8
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_SA.UTF-8
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.CP1131
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.CP1251
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.ISO8859-5
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.UTF-8
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/bg_BG.CP1251
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/bg_BG.UTF-8
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.ISO8859-1
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.ISO8859-15
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.UTF-8
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.ISO8859-1
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.ISO8859-15
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.UTF-8
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.ISO8859-1
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.ISO8859-15
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.UTF-8
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.ISO8859-1
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.ISO8859-15
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.UTF-8
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/cs_CZ.ISO8859-2
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/cs_CZ.UTF-8
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.ISO8859-1
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.ISO8859-15
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.UTF-8
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.ISO8859-1
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.ISO8859-15
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.UTF-8
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.ISO8859-1
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.ISO8859-15
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.UTF-8
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.ISO8859-1
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.ISO8859-15
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.UTF-8
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/el_GR.ISO8859-7
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_COLLATE
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_CTYPE
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_MESSAGES
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_MONETARY
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_NUMERIC
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_TIME
OLD_DIRS+=usr/share/locale/el_GR.UTF-8
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.ISO8859-1
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.ISO8859-15
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.US-ASCII
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.UTF-8
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.ISO8859-1
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.ISO8859-15
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.US-ASCII
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.UTF-8
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.ISO8859-1
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.ISO8859-15
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.US-ASCII
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.UTF-8
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_HK.ISO8859-1
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_HK.UTF-8
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.ISO8859-1
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.ISO8859-15
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.UTF-8
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.ISO8859-1
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.ISO8859-15
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.US-ASCII
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.UTF-8
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_PH.UTF-8
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_SG.ISO8859-1
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_SG.UTF-8
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.ISO8859-1
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.ISO8859-15
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.US-ASCII
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.UTF-8
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.ISO8859-1
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.ISO8859-15
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.US-ASCII
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.UTF-8
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_AR.ISO8859-1
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_AR.UTF-8
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_CR.UTF-8
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.ISO8859-1
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.ISO8859-15
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.UTF-8
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_MX.ISO8859-1
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_MX.UTF-8
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.ISO8859-1
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.ISO8859-15
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.UTF-8
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.ISO8859-1
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.ISO8859-15
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.UTF-8
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.ISO8859-1
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.ISO8859-15
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.UTF-8
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.ISO8859-1
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.ISO8859-15
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.UTF-8
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.ISO8859-1
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.ISO8859-15
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.UTF-8
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.ISO8859-1
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.ISO8859-15
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.UTF-8
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.ISO8859-1
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.ISO8859-15
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.UTF-8
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/he_IL.UTF-8
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hi_IN.ISCII-DEV
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_COLLATE
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_CTYPE
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_MESSAGES
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_MONETARY
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_NUMERIC
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_TIME
OLD_DIRS+=usr/share/locale/hi_IN.UTF-8
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hr_HR.ISO8859-2
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/hr_HR.UTF-8
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hu_HU.ISO8859-2
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/hu_HU.UTF-8
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hy_AM.ARMSCII-8
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_TIME
OLD_DIRS+=usr/share/locale/hy_AM.UTF-8
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.ISO8859-1
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.ISO8859-15
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.UTF-8
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.ISO8859-1
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.ISO8859-15
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.UTF-8
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.ISO8859-1
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.ISO8859-15
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.UTF-8
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.eucJP
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.SJIS
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.UTF-8
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/kk_KZ.UTF-8
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.CP949
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.eucKR
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.UTF-8
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-13
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_NUMERIC
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_TIME
OLD_DIRS+=usr/share/locale/lt_LT.UTF-8
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/lv_LV.ISO8859-13
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_CTYPE
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_MESSAGES
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_MONETARY
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_NUMERIC
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_TIME
OLD_DIRS+=usr/share/locale/lv_LV.UTF-8
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/mn_MN.UTF-8
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.ISO8859-1
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.ISO8859-15
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.UTF-8
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.ISO8859-1
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.ISO8859-15
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.UTF-8
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.ISO8859-1
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.ISO8859-15
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.UTF-8
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.ISO8859-1
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.ISO8859-15
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.UTF-8
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pl_PL.ISO8859-2
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/pl_PL.UTF-8
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pt_BR.ISO8859-1
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/pt_BR.UTF-8
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.ISO8859-1
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.ISO8859-15
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.UTF-8
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ro_RO.ISO8859-2
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/ro_RO.UTF-8
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.CP1251
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.CP866
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.ISO8859-5
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.KOI8-R
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.UTF-8
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/se_FI.UTF-8
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/se_NO.UTF-8
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sk_SK.ISO8859-2
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sk_SK.UTF-8
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sl_SI.ISO8859-2
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sl_SI.UTF-8
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.ISO8859-5
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.ISO8859-2
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.UTF-8@latin
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.ISO8859-1
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.ISO8859-15
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.UTF-8
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.ISO8859-1
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.ISO8859-15
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.UTF-8
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/tr_TR.ISO8859-9
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_COLLATE
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_CTYPE
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_MESSAGES
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_MONETARY
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_NUMERIC
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_TIME
OLD_DIRS+=usr/share/locale/tr_TR.UTF-8
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.CP1251
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.ISO8859-5
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.KOI8-U
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.UTF-8
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.eucCN
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GB18030
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GB2312
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GBK
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_HK.UTF-8
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_TW.Big5
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_TIME
OLD_DIRS+=usr/share/locale/zh_TW.UTF-8
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_TIME
.endif
.if ${MK_LOCATE} == no
OLD_FILES+=etc/locate.rc
OLD_FILES+=etc/periodic/weekly/310.locate
OLD_FILES+=usr/bin/locate
OLD_FILES+=usr/libexec/locate.bigram
OLD_FILES+=usr/libexec/locate.code
OLD_FILES+=usr/libexec/locate.concatdb
OLD_FILES+=usr/libexec/locate.mklocatedb
OLD_FILES+=usr/libexec/locate.updatedb
OLD_FILES+=usr/share/man/man1/locate.1.gz
OLD_FILES+=usr/share/man/man8/locate.updatedb.8.gz
OLD_FILES+=usr/share/man/man8/updatedb.8.gz
.endif
.if ${MK_LPR} == no
OLD_FILES+=etc/hosts.lpd
OLD_FILES+=etc/printcap
OLD_FILES+=etc/newsyslog.conf.d/lpr.conf
OLD_FILES+=etc/rc.d/lpd
OLD_FILES+=etc/syslog.d/lpr.conf
OLD_FILES+=usr/bin/lp
OLD_FILES+=usr/bin/lpq
OLD_FILES+=usr/bin/lpr
OLD_FILES+=usr/bin/lprm
OLD_FILES+=usr/libexec/lpr/ru/bjc-240.sh.sample
OLD_FILES+=usr/libexec/lpr/ru/koi2alt
OLD_FILES+=usr/libexec/lpr/ru/koi2855
OLD_DIRS+=usr/libexec/lpr/ru
OLD_FILES+=usr/libexec/lpr/lpf
OLD_DIRS+=usr/libexec/lpr
OLD_FILES+=usr/sbin/chkprintcap
OLD_FILES+=usr/sbin/lpc
OLD_FILES+=usr/sbin/lpd
OLD_FILES+=usr/sbin/lptest
OLD_FILES+=usr/sbin/pac
OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/07.lpd
OLD_FILES+=usr/share/examples/etc/hosts.lpd
OLD_FILES+=usr/share/examples/etc/printcap
OLD_FILES+=usr/share/man/man1/lp.1.gz
OLD_FILES+=usr/share/man/man1/lpq.1.gz
OLD_FILES+=usr/share/man/man1/lpr.1.gz
OLD_FILES+=usr/share/man/man1/lprm.1.gz
OLD_FILES+=usr/share/man/man1/lptest.1.gz
OLD_FILES+=usr/share/man/man5/printcap.5.gz
OLD_FILES+=usr/share/man/man8/chkprintcap.8.gz
OLD_FILES+=usr/share/man/man8/lpc.8.gz
OLD_FILES+=usr/share/man/man8/lpd.8.gz
OLD_FILES+=usr/share/man/man8/pac.8.gz
.endif
.if ${MK_MAIL} == no
OLD_FILES+=etc/aliases
OLD_FILES+=etc/mail.rc
OLD_FILES+=etc/mail/aliases
OLD_FILES+=etc/mail/mailer.conf
OLD_FILES+=etc/periodic/daily/130.clean-msgs
OLD_FILES+=usr/bin/Mail
OLD_FILES+=usr/bin/biff
OLD_FILES+=usr/bin/from
OLD_FILES+=usr/bin/mail
OLD_FILES+=usr/bin/mailx
OLD_FILES+=usr/bin/msgs
OLD_FILES+=usr/libexec/comsat
OLD_FILES+=usr/share/examples/etc/mail.rc
OLD_FILES+=usr/share/man/man1/Mail.1.gz
OLD_FILES+=usr/share/man/man1/biff.1.gz
OLD_FILES+=usr/share/man/man1/from.1.gz
OLD_FILES+=usr/share/man/man1/mail.1.gz
OLD_FILES+=usr/share/man/man1/mailx.1.gz
OLD_FILES+=usr/share/man/man1/msgs.1.gz
OLD_FILES+=usr/share/man/man8/comsat.8.gz
OLD_FILES+=usr/share/misc/mail.help
OLD_FILES+=usr/share/misc/mail.tildehelp
.endif
.if ${MK_MAILWRAPPER} == no
OLD_FILES+=etc/mail/mailer.conf
# Don't remove, for no mailwrapper case:
# /usr/sbin/sendmail -> /usr/sbin/mailwrapper
# /usr/sbin/mailwrapper -> /usr/libexec/sendmail/sendmail
#OLD_FILES+=usr/sbin/mailwrapper
OLD_FILES+=usr/share/man/man8/mailwrapper.8.gz
.endif
.if ${MK_MAKE} == no
OLD_FILES+=usr/bin/make
OLD_FILES+=usr/share/man/man1/make.1.gz
OLD_FILES+=usr/share/mk/atf.test.mk
OLD_FILES+=usr/share/mk/bsd.README
OLD_FILES+=usr/share/mk/bsd.arch.inc.mk
OLD_FILES+=usr/share/mk/bsd.compiler.mk
OLD_FILES+=usr/share/mk/bsd.cpu.mk
OLD_FILES+=usr/share/mk/bsd.crunchgen.mk
OLD_FILES+=usr/share/mk/bsd.dep.mk
OLD_FILES+=usr/share/mk/bsd.doc.mk
OLD_FILES+=usr/share/mk/bsd.dtb.mk
OLD_FILES+=usr/share/mk/bsd.endian.mk
OLD_FILES+=usr/share/mk/bsd.files.mk
OLD_FILES+=usr/share/mk/bsd.incs.mk
OLD_FILES+=usr/share/mk/bsd.info.mk
OLD_FILES+=usr/share/mk/bsd.init.mk
OLD_FILES+=usr/share/mk/bsd.kmod.mk
OLD_FILES+=usr/share/mk/bsd.lib.mk
OLD_FILES+=usr/share/mk/bsd.libnames.mk
OLD_FILES+=usr/share/mk/bsd.links.mk
OLD_FILES+=usr/share/mk/bsd.man.mk
OLD_FILES+=usr/share/mk/bsd.mkopt.mk
OLD_FILES+=usr/share/mk/bsd.nls.mk
OLD_FILES+=usr/share/mk/bsd.obj.mk
OLD_FILES+=usr/share/mk/bsd.opts.mk
OLD_FILES+=usr/share/mk/bsd.own.mk
OLD_FILES+=usr/share/mk/bsd.port.mk
OLD_FILES+=usr/share/mk/bsd.port.options.mk
OLD_FILES+=usr/share/mk/bsd.port.post.mk
OLD_FILES+=usr/share/mk/bsd.port.pre.mk
OLD_FILES+=usr/share/mk/bsd.port.subdir.mk
OLD_FILES+=usr/share/mk/bsd.prog.mk
OLD_FILES+=usr/share/mk/bsd.progs.mk
OLD_FILES+=usr/share/mk/bsd.snmpmod.mk
OLD_FILES+=usr/share/mk/bsd.subdir.mk
OLD_FILES+=usr/share/mk/bsd.symver.mk
OLD_FILES+=usr/share/mk/bsd.sys.mk
OLD_FILES+=usr/share/mk/bsd.test.mk
OLD_FILES+=usr/share/mk/plain.test.mk
OLD_FILES+=usr/share/mk/suite.test.mk
OLD_FILES+=usr/share/mk/sys.mk
OLD_FILES+=usr/share/mk/tap.test.mk
OLD_FILES+=usr/share/mk/version_gen.awk
OLD_FILES+=usr/tests/usr.bin/bmake/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/common.sh
OLD_FILES+=usr/tests/usr.bin/bmake/execution/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/shell
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/shell
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/TEST2.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/TEST2.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/test-new.mk
OLD_FILES+=usr/tests/usr.bin/bmake/variables/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/legacy_test
.endif
.if ${MK_MAN} == no
MAN_FILES!=find ${DESTDIR}/usr/share/man ${DESTDIR}/usr/share/openssl/man -type f | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${MAN_FILES}
MAN_DIRS!=find ${DESTDIR}/usr/share/man ${DESTDIR}/usr/share/openssl/man -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${MAN_DIRS}
.endif
.if ${MK_MAN_UTILS} == no
OLD_FILES+=etc/periodic/weekly/320.whatis
OLD_FILES+=usr/bin/apropos
OLD_FILES+=usr/bin/makewhatis
OLD_FILES+=usr/bin/man
OLD_FILES+=usr/bin/manpath
OLD_FILES+=usr/bin/whatis
OLD_FILES+=usr/libexec/makewhatis.local
OLD_FILES+=usr/sbin/manctl
OLD_FILES+=usr/share/man/man1/apropos.1.gz
OLD_FILES+=usr/share/man/man1/makewhatis.1.gz
OLD_FILES+=usr/share/man/man1/man.1.gz
OLD_FILES+=usr/share/man/man1/manpath.1.gz
OLD_FILES+=usr/share/man/man1/whatis.1.gz
OLD_FILES+=usr/share/man/man5/man.conf.5.gz
OLD_FILES+=usr/share/man/man8/makewhatis.local.8.gz
OLD_FILES+=usr/share/man/man8/manctl.8.gz
OLD_FILES+=usr/share/man/whatis
OLD_FILES+=usr/share/openssl/man/whatis
.endif
.if ${MK_NDIS} == no
OLD_FILES+=usr/sbin/ndiscvt
OLD_FILES+=usr/sbin/ndisgen
OLD_FILES+=usr/share/man/man8/ndiscvt.8.gz
OLD_FILES+=usr/share/man/man8/ndisgen.8.gz
OLD_FILES+=usr/share/misc/windrv_stub.c
.endif
.if ${MK_NETCAT} == no
OLD_FILES+=rescue/nc
OLD_FILES+=usr/bin/nc
OLD_FILES+=usr/share/man/man1/nc.1.gz
.endif
.if ${MK_NETGRAPH} == no
OLD_FILES+=usr/include/netgraph.h
OLD_FILES+=usr/lib/libnetgraph.a
OLD_FILES+=usr/lib/libnetgraph.so
OLD_LIBS+=usr/lib/libnetgraph.so.4
OLD_FILES+=usr/lib/libnetgraph_p.a
OLD_FILES+=usr/lib32/libnetgraph.a
OLD_FILES+=usr/lib32/libnetgraph.so
OLD_LIBS+=usr/lib32/libnetgraph.so.4
OLD_FILES+=usr/lib32/libnetgraph_p.a
OLD_FILES+=usr/libexec/pppoed
OLD_FILES+=usr/sbin/flowctl
OLD_FILES+=usr/sbin/lmcconfig
OLD_FILES+=usr/sbin/ngctl
OLD_FILES+=usr/sbin/nghook
OLD_FILES+=usr/share/man/man3/NgAllocRecvAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgAllocRecvData.3.gz
OLD_FILES+=usr/share/man/man3/NgAllocRecvMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgMkSockNode.3.gz
OLD_FILES+=usr/share/man/man3/NgNameNode.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvData.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendData.3.gz
OLD_FILES+=usr/share/man/man3/NgSendMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendReplyMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSetDebug.3.gz
OLD_FILES+=usr/share/man/man3/NgSetErrLog.3.gz
OLD_FILES+=usr/share/man/man3/netgraph.3.gz
OLD_FILES+=usr/share/man/man8/flowctl.8.gz
OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz
OLD_FILES+=usr/share/man/man8/ngctl.8.gz
OLD_FILES+=usr/share/man/man8/nghook.8.gz
OLD_FILES+=usr/share/man/man8/pppoed.8.gz
.endif
.if ${MK_NETGRAPH_SUPPORT} == no
OLD_FILES+=usr/include/bsnmp/snmp_netgraph.h
OLD_FILES+=usr/lib/snmp_netgraph.so
OLD_LIBS+=usr/lib/snmp_netgraph.so.6
OLD_FILES+=usr/share/man/man3/snmp_netgraph.3.gz
OLD_FILES+=usr/share/snmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-NETGRAPH.txt
.endif
.if ${MK_NIS} == no
OLD_FILES+=etc/rc.d/ypbind
OLD_FILES+=etc/rc.d/ypldap
OLD_FILES+=etc/rc.d/yppasswdd
OLD_FILES+=etc/rc.d/ypserv
OLD_FILES+=etc/rc.d/ypset
OLD_FILES+=etc/rc.d/ypupdated
OLD_FILES+=etc/rc.d/ypxfrd
OLD_FILES+=usr/bin/ypcat
OLD_FILES+=usr/bin/ypchfn
OLD_FILES+=usr/bin/ypchpass
OLD_FILES+=usr/bin/ypchsh
OLD_FILES+=usr/bin/ypmatch
OLD_FILES+=usr/bin/yppasswd
OLD_FILES+=usr/bin/ypwhich
OLD_FILES+=usr/include/ypclnt.h
OLD_FILES+=usr/lib/libypclnt.a
OLD_FILES+=usr/lib/libypclnt.so
OLD_LIBS+=usr/lib/libypclnt.so.4
OLD_FILES+=usr/lib/libypclnt_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libypclnt.a
OLD_FILES+=usr/lib32/libypclnt.so
OLD_LIBS+=usr/lib32/libypclnt.so.4
OLD_FILES+=usr/lib32/libypclnt_p.a
.endif
OLD_FILES+=usr/libexec/mknetid
OLD_FILES+=usr/libexec/yppwupdate
OLD_FILES+=usr/libexec/ypxfr
OLD_FILES+=usr/sbin/rpc.yppasswdd
OLD_FILES+=usr/sbin/rpc.ypupdated
OLD_FILES+=usr/sbin/rpc.ypxfrd
OLD_FILES+=usr/sbin/yp_mkdb
OLD_FILES+=usr/sbin/ypbind
OLD_FILES+=usr/sbin/ypinit
OLD_FILES+=usr/sbin/ypldap
OLD_FILES+=usr/sbin/yppoll
OLD_FILES+=usr/sbin/yppush
OLD_FILES+=usr/sbin/ypserv
OLD_FILES+=usr/sbin/ypset
OLD_FILES+=usr/share/man/man1/ypcat.1.gz
OLD_FILES+=usr/share/man/man1/ypchfn.1.gz
OLD_FILES+=usr/share/man/man1/ypchpass.1.gz
OLD_FILES+=usr/share/man/man1/ypchsh.1.gz
OLD_FILES+=usr/share/man/man1/ypmatch.1.gz
OLD_FILES+=usr/share/man/man1/yppasswd.1.gz
OLD_FILES+=usr/share/man/man1/ypwhich.1.gz
OLD_FILES+=usr/share/man/man5/netid.5.gz
OLD_FILES+=usr/share/man/man5/ypldap.conf.5.gz
OLD_FILES+=usr/share/man/man8/mknetid.8.gz
OLD_FILES+=usr/share/man/man8/rpc.yppasswdd.8.gz
OLD_FILES+=usr/share/man/man8/rpc.ypxfrd.8.gz
OLD_FILES+=usr/share/man/man8/NIS.8.gz
OLD_FILES+=usr/share/man/man8/YP.8.gz
OLD_FILES+=usr/share/man/man8/yp.8.gz
OLD_FILES+=usr/share/man/man8/nis.8.gz
OLD_FILES+=usr/share/man/man8/yp_mkdb.8.gz
OLD_FILES+=usr/share/man/man8/ypbind.8.gz
OLD_FILES+=usr/share/man/man8/ypinit.8.gz
OLD_FILES+=usr/share/man/man8/ypldap.8.gz
OLD_FILES+=usr/share/man/man8/yppoll.8.gz
OLD_FILES+=usr/share/man/man8/yppush.8.gz
OLD_FILES+=usr/share/man/man8/ypserv.8.gz
OLD_FILES+=usr/share/man/man8/ypset.8.gz
OLD_FILES+=usr/share/man/man8/ypxfr.8.gz
OLD_FILES+=var/yp/Makefile
OLD_FILES+=var/yp/Makefile.dist
OLD_DIRS+=var/yp
.endif
.if ${MK_NLS} == no
OLD_DIRS+=usr/share/nls/
OLD_DIRS+=usr/share/nls/C
OLD_FILES+=usr/share/nls/C/ee.cat
OLD_DIRS+=usr/share/nls/af_ZA.ISO8859-1
OLD_DIRS+=usr/share/nls/af_ZA.ISO8859-15
OLD_DIRS+=usr/share/nls/af_ZA.UTF-8
OLD_DIRS+=usr/share/nls/am_ET.UTF-8
OLD_DIRS+=usr/share/nls/be_BY.CP1131
OLD_DIRS+=usr/share/nls/be_BY.CP1251
OLD_DIRS+=usr/share/nls/be_BY.ISO8859-5
OLD_DIRS+=usr/share/nls/be_BY.UTF-8
OLD_FILES+=usr/share/nls/be_BY.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/bg_BG.CP1251
OLD_DIRS+=usr/share/nls/bg_BG.UTF-8
OLD_DIRS+=usr/share/nls/ca_ES.ISO8859-1
OLD_FILES+=usr/share/nls/ca_ES.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/ca_ES.ISO8859-15
OLD_DIRS+=usr/share/nls/ca_ES.UTF-8
OLD_DIRS+=usr/share/nls/cs_CZ.ISO8859-2
OLD_DIRS+=usr/share/nls/cs_CZ.UTF-8
OLD_DIRS+=usr/share/nls/da_DK.ISO8859-1
OLD_DIRS+=usr/share/nls/da_DK.ISO8859-15
OLD_DIRS+=usr/share/nls/da_DK.UTF-8
OLD_DIRS+=usr/share/nls/de_AT.ISO8859-1
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_AT.ISO8859-15
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_AT.UTF-8
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.ISO8859-1
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.ISO8859-15
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.UTF-8
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.ISO8859-1
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.ISO8859-15
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.UTF-8
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/el_GR.ISO8859-7
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/libc.cat
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/tcsh.cat
OLD_DIRS+=usr/share/nls/el_GR.UTF-8
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/en_AU.ISO8859-1
OLD_DIRS+=usr/share/nls/en_AU.ISO8859-15
OLD_DIRS+=usr/share/nls/en_AU.US-ASCII
OLD_DIRS+=usr/share/nls/en_AU.UTF-8
OLD_DIRS+=usr/share/nls/en_CA.ISO8859-1
OLD_FILES+=usr/share/nls/en_US.ISO8859-1/ee.cat
OLD_DIRS+=usr/share/nls/en_CA.ISO8859-15
OLD_DIRS+=usr/share/nls/en_CA.US-ASCII
OLD_DIRS+=usr/share/nls/en_CA.UTF-8
OLD_DIRS+=usr/share/nls/en_GB.ISO8859-1
OLD_DIRS+=usr/share/nls/en_GB.ISO8859-15
OLD_DIRS+=usr/share/nls/en_GB.US-ASCII
OLD_DIRS+=usr/share/nls/en_GB.UTF-8
OLD_DIRS+=usr/share/nls/en_IE.UTF-8
OLD_DIRS+=usr/share/nls/en_NZ.ISO8859-1
OLD_DIRS+=usr/share/nls/en_NZ.ISO8859-15
OLD_DIRS+=usr/share/nls/en_NZ.US-ASCII
OLD_DIRS+=usr/share/nls/en_NZ.UTF-8
OLD_DIRS+=usr/share/nls/en_US.ISO8859-1
OLD_DIRS+=usr/share/nls/en_US.ISO8859-15
OLD_FILES+=usr/share/nls/en_US.ISO8859-15/ee.cat
OLD_DIRS+=usr/share/nls/en_US.UTF-8
OLD_DIRS+=usr/share/nls/es_ES.UTF-8
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/es_ES.ISO8859-1
OLD_DIRS+=usr/share/nls/es_ES.ISO8859-15
OLD_FILES+=usr/share/nls/es_ES.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/et_EE.ISO8859-15
OLD_FILES+=usr/share/nls/et_EE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/et_EE.UTF-8
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.ISO8859-1
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.ISO8859-15
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.UTF-8
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.ISO8859-1
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.ISO8859-15
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.UTF-8
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.ISO8859-1
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.ISO8859-15
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.UTF-8
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.ISO8859-1
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.ISO8859-15
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.UTF-8
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.ISO8859-1
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.ISO8859-15
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.UTF-8
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/gl_ES.ISO8859-1
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/he_IL.UTF-8
OLD_DIRS+=usr/share/nls/hi_IN.ISCII-DEV
OLD_DIRS+=usr/share/nls/hr_HR.ISO8859-2
OLD_DIRS+=usr/share/nls/hu_HU.ISO8859-2
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/libc.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/sort.cat
OLD_DIRS+=usr/share/nls/hr_HR.UTF-8
OLD_DIRS+=usr/share/nls/hu_HU.UTF-8
OLD_DIRS+=usr/share/nls/hy_AM.ARMSCII-8
OLD_DIRS+=usr/share/nls/hy_AM.UTF-8
OLD_DIRS+=usr/share/nls/is_IS.ISO8859-1
OLD_DIRS+=usr/share/nls/is_IS.ISO8859-15
OLD_DIRS+=usr/share/nls/is_IS.UTF-8
OLD_DIRS+=usr/share/nls/it_CH.ISO8859-1
OLD_FILES+=usr/share/nls/it_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/it_CH.ISO8859-15
OLD_FILES+=usr/share/nls/it_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/it_CH.UTF-8
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.ISO8859-1
OLD_FILES+=usr/share/nls/it_IT.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.ISO8859-15
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/libc.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.UTF-8
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.SJIS
OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.UTF-8
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/libc.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.eucJP
OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/libc.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/tcsh.cat
OLD_DIRS+=usr/share/nls/kk_KZ.PT154
OLD_DIRS+=usr/share/nls/kk_KZ.UTF-8
OLD_DIRS+=usr/share/nls/ko_KR.CP949
OLD_DIRS+=usr/share/nls/ko_KR.UTF-8
OLD_FILES+=usr/share/nls/ko_KR.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/ko_KR.eucKR
OLD_FILES+=usr/share/nls/ko_KR.eucKR/libc.cat
OLD_DIRS+=usr/share/nls/lv_LV.UTF-8
OLD_DIRS+=usr/share/nls/lt_LT.ISO8859-13
OLD_DIRS+=usr/share/nls/lt_LT.UTF-8
OLD_DIRS+=usr/share/nls/lv_LV.ISO8859-13
OLD_DIRS+=usr/share/nls/mn_MN.UTF-8
OLD_FILES+=usr/share/nls/mn_MN.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/nl_BE.ISO8859-1
OLD_DIRS+=usr/share/nls/nl_BE.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_BE.UTF-8
OLD_DIRS+=usr/share/nls/no_NO.ISO8859-1
OLD_FILES+=usr/share/nls/nl_NL.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/nl_NL.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_NL.ISO8859-1
OLD_FILES+=usr/share/nls/no_NO.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/no_NO.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_NL.UTF-8
OLD_DIRS+=usr/share/nls/no_NO.UTF-8
OLD_DIRS+=usr/share/nls/pl_PL.ISO8859-2
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/libc.cat
OLD_DIRS+=usr/share/nls/pl_PL.UTF-8
OLD_DIRS+=usr/share/nls/pt_BR.ISO8859-1
OLD_DIRS+=usr/share/nls/pt_BR.UTF-8
OLD_DIRS+=usr/share/nls/pt_PT.ISO8859-1
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/pt_PT.ISO8859-1/ee.cat
OLD_DIRS+=usr/share/nls/pt_PT.ISO8859-15
OLD_DIRS+=usr/share/nls/pt_PT.UTF-8
OLD_DIRS+=usr/share/nls/ro_RO.ISO8859-2
OLD_DIRS+=usr/share/nls/ro_RO.UTF-8
OLD_DIRS+=usr/share/nls/ru_RU.CP1251
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.CP866
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.ISO8859-5
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.KOI8-R
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/ee.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/libc.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.UTF-8
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/sk_SK.ISO8859-2
OLD_FILES+=usr/share/nls/sk_SK.ISO8859-2/libc.cat
OLD_DIRS+=usr/share/nls/sk_SK.UTF-8
OLD_DIRS+=usr/share/nls/sl_SI.ISO8859-2
OLD_DIRS+=usr/share/nls/sl_SI.UTF-8
OLD_DIRS+=usr/share/nls/sr_YU.ISO8859-2
OLD_DIRS+=usr/share/nls/sr_YU.ISO8859-5
OLD_DIRS+=usr/share/nls/sr_YU.UTF-8
OLD_DIRS+=usr/share/nls/sv_SE.ISO8859-1
OLD_FILES+=usr/share/nls/sv_SE.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/sv_SE.ISO8859-15
OLD_DIRS+=usr/share/nls/sv_SE.UTF-8
OLD_DIRS+=usr/share/nls/tr_TR.ISO8859-9
OLD_DIRS+=usr/share/nls/tr_TR.UTF-8
OLD_DIRS+=usr/share/nls/uk_UA.ISO8859-5
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_DIRS+=usr/share/nls/uk_UA.KOI8-U
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/ee.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/tcsh.cat
OLD_DIRS+=usr/share/nls/uk_UA.UTF-8
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/libc.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/zh_CN.GB18030
OLD_FILES+=usr/share/nls/zh_CN.GB18030/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.GBK
OLD_DIRS+=usr/share/nls/zh_CN.GB2312
OLD_FILES+=usr/share/nls/zh_CN.GB2312/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.UTF-8
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.eucCN
OLD_DIRS+=usr/share/nls/zh_HK.UTF-8
OLD_DIRS+=usr/share/nls/zh_TW.UTF-8
OLD_FILES+=usr/tests/bin/sh/builtins/locale1.0
.endif
.if ${MK_NLS_CATALOGS} == no
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
.endif
.if ${MK_NS_CACHING} == no
OLD_FILES+=etc/nscd.conf
OLD_FILES+=etc/rc.d/nscd
OLD_FILES+=usr/sbin/nscd
OLD_FILES+=usr/share/examples/etc/nscd.conf
OLD_FILES+=usr/share/man/man5/nscd.conf.5.gz
OLD_FILES+=usr/share/man/man8/nscd.8.gz
.endif
.if ${MK_NTP} == no
OLD_FILES+=etc/ntp.conf
OLD_FILES+=etc/periodic/daily/480.status-ntpd
OLD_FILES+=usr/bin/ntpq
OLD_FILES+=usr/sbin/ntp-keygen
OLD_FILES+=usr/sbin/ntpd
OLD_FILES+=usr/sbin/ntpdate
OLD_FILES+=usr/sbin/ntpdc
OLD_FILES+=usr/sbin/ntptime
OLD_FILES+=usr/sbin/sntp
OLD_FILES+=usr/share/doc/ntp/access.html
OLD_FILES+=usr/share/doc/ntp/accopt.html
OLD_FILES+=usr/share/doc/ntp/assoc.html
OLD_FILES+=usr/share/doc/ntp/audio.html
OLD_FILES+=usr/share/doc/ntp/authentic.html
OLD_FILES+=usr/share/doc/ntp/authopt.html
OLD_FILES+=usr/share/doc/ntp/autokey.html
OLD_FILES+=usr/share/doc/ntp/bugs.html
OLD_FILES+=usr/share/doc/ntp/build.html
OLD_FILES+=usr/share/doc/ntp/clock.html
OLD_FILES+=usr/share/doc/ntp/clockopt.html
OLD_FILES+=usr/share/doc/ntp/cluster.html
OLD_FILES+=usr/share/doc/ntp/comdex.html
OLD_FILES+=usr/share/doc/ntp/config.html
OLD_FILES+=usr/share/doc/ntp/confopt.html
OLD_FILES+=usr/share/doc/ntp/copyright.html
OLD_FILES+=usr/share/doc/ntp/debug.html
OLD_FILES+=usr/share/doc/ntp/decode.html
OLD_FILES+=usr/share/doc/ntp/discipline.html
OLD_FILES+=usr/share/doc/ntp/discover.html
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver1.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver10.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver11.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver12.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver16.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver18.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver19.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver20.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver22.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver26.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver27.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver28.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver29.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver3.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver30.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver31.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver32.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver33.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver34.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver35.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver36.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver37.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver38.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver39.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver4.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver40.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver42.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver43.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver44.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver45.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver46.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver5.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver6.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver7.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver8.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver9.html
OLD_FILES+=usr/share/doc/ntp/drivers/icons/home.gif
OLD_FILES+=usr/share/doc/ntp/drivers/icons/mail2.gif
OLD_FILES+=usr/share/doc/ntp/drivers/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/drivers/oncore-shmem.html
OLD_FILES+=usr/share/doc/ntp/drivers/scripts/footer.txt
OLD_FILES+=usr/share/doc/ntp/drivers/scripts/style.css
OLD_FILES+=usr/share/doc/ntp/drivers/tf582_4.html
OLD_FILES+=usr/share/doc/ntp/extern.html
OLD_FILES+=usr/share/doc/ntp/filter.html
OLD_FILES+=usr/share/doc/ntp/hints.html
OLD_FILES+=usr/share/doc/ntp/hints/a-ux
OLD_FILES+=usr/share/doc/ntp/hints/aix
OLD_FILES+=usr/share/doc/ntp/hints/bsdi
OLD_FILES+=usr/share/doc/ntp/hints/changes
OLD_FILES+=usr/share/doc/ntp/hints/decosf1
OLD_FILES+=usr/share/doc/ntp/hints/decosf2
OLD_FILES+=usr/share/doc/ntp/hints/freebsd
OLD_FILES+=usr/share/doc/ntp/hints/hpux
OLD_FILES+=usr/share/doc/ntp/hints/linux
OLD_FILES+=usr/share/doc/ntp/hints/mpeix
OLD_FILES+=usr/share/doc/ntp/hints/notes-xntp-v3
OLD_FILES+=usr/share/doc/ntp/hints/parse
OLD_FILES+=usr/share/doc/ntp/hints/refclocks
OLD_FILES+=usr/share/doc/ntp/hints/rs6000
OLD_FILES+=usr/share/doc/ntp/hints/sco.html
OLD_FILES+=usr/share/doc/ntp/hints/sgi
OLD_FILES+=usr/share/doc/ntp/hints/solaris-dosynctodr.html
OLD_FILES+=usr/share/doc/ntp/hints/solaris.html
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.4023118
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.4095849
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.S99ntpd
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.patchfreq
OLD_FILES+=usr/share/doc/ntp/hints/sun4
OLD_FILES+=usr/share/doc/ntp/hints/svr4-dell
OLD_FILES+=usr/share/doc/ntp/hints/svr4_package
OLD_FILES+=usr/share/doc/ntp/hints/todo
OLD_FILES+=usr/share/doc/ntp/hints/vxworks.html
OLD_FILES+=usr/share/doc/ntp/hints/winnt.html
OLD_FILES+=usr/share/doc/ntp/history.html
OLD_FILES+=usr/share/doc/ntp/howto.html
OLD_FILES+=usr/share/doc/ntp/huffpuff.html
OLD_FILES+=usr/share/doc/ntp/icons/home.gif
OLD_FILES+=usr/share/doc/ntp/icons/mail2.gif
OLD_FILES+=usr/share/doc/ntp/icons/sitemap.png
OLD_FILES+=usr/share/doc/ntp/index.html
OLD_FILES+=usr/share/doc/ntp/kern.html
OLD_FILES+=usr/share/doc/ntp/kernpps.html
OLD_FILES+=usr/share/doc/ntp/keygen.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/leap.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/miscopt.html
OLD_FILES+=usr/share/doc/ntp/monopt.html
OLD_FILES+=usr/share/doc/ntp/msyslog.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/ntp-keygen.html
OLD_FILES+=usr/share/doc/ntp/ntp-wait.html
OLD_FILES+=usr/share/doc/ntp/ntp.conf.html
OLD_FILES+=usr/share/doc/ntp/ntp.keys.html
OLD_FILES+=usr/share/doc/ntp/ntp_conf.html
OLD_FILES+=usr/share/doc/ntp/ntpd.html
OLD_FILES+=usr/share/doc/ntp/ntpdate.html
OLD_FILES+=usr/share/doc/ntp/ntpdc.html
OLD_FILES+=usr/share/doc/ntp/ntpdsim.html
OLD_FILES+=usr/share/doc/ntp/ntpdsim_new.html
OLD_FILES+=usr/share/doc/ntp/ntpq.html
OLD_FILES+=usr/share/doc/ntp/ntpsnmpd.html
OLD_FILES+=usr/share/doc/ntp/ntptime.html
OLD_FILES+=usr/share/doc/ntp/ntptrace.html
OLD_FILES+=usr/share/doc/ntp/orphan.html
OLD_FILES+=usr/share/doc/ntp/parsedata.html
OLD_FILES+=usr/share/doc/ntp/parsenew.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/pic/9400n.jpg
OLD_FILES+=usr/share/doc/ntp/pic/alice11.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice13.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice15.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice23.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice31.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice32.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice35.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice38.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice44.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice47.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice51.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice61.gif
OLD_FILES+=usr/share/doc/ntp/pic/barnstable.gif
OLD_FILES+=usr/share/doc/ntp/pic/beaver.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom3.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom3a.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom4.gif
OLD_FILES+=usr/share/doc/ntp/pic/broad.gif
OLD_FILES+=usr/share/doc/ntp/pic/bustardfly.gif
OLD_FILES+=usr/share/doc/ntp/pic/c51.jpg
OLD_FILES+=usr/share/doc/ntp/pic/description.jpg
OLD_FILES+=usr/share/doc/ntp/pic/discipline.gif
OLD_FILES+=usr/share/doc/ntp/pic/dogsnake.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver29.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver43_1.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver43_2.jpg
OLD_FILES+=usr/share/doc/ntp/pic/fg6021.gif
OLD_FILES+=usr/share/doc/ntp/pic/fg6039.jpg
OLD_FILES+=usr/share/doc/ntp/pic/fig_3_1.gif
OLD_FILES+=usr/share/doc/ntp/pic/flatheads.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt1.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt2.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt3.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt4.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt5.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt6.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt7.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt8.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt9.gif
OLD_FILES+=usr/share/doc/ntp/pic/freq1211.gif
OLD_FILES+=usr/share/doc/ntp/pic/gadget.jpg
OLD_FILES+=usr/share/doc/ntp/pic/gps167.jpg
OLD_FILES+=usr/share/doc/ntp/pic/group.gif
OLD_FILES+=usr/share/doc/ntp/pic/hornraba.gif
OLD_FILES+=usr/share/doc/ntp/pic/igclock.gif
OLD_FILES+=usr/share/doc/ntp/pic/neoclock4x.gif
OLD_FILES+=usr/share/doc/ntp/pic/offset1211.gif
OLD_FILES+=usr/share/doc/ntp/pic/oncore_evalbig.gif
OLD_FILES+=usr/share/doc/ntp/pic/oncore_remoteant.jpg
OLD_FILES+=usr/share/doc/ntp/pic/oncore_utplusbig.gif
OLD_FILES+=usr/share/doc/ntp/pic/oz2.gif
OLD_FILES+=usr/share/doc/ntp/pic/panda.gif
OLD_FILES+=usr/share/doc/ntp/pic/pd_om006.gif
OLD_FILES+=usr/share/doc/ntp/pic/pd_om011.gif
OLD_FILES+=usr/share/doc/ntp/pic/peer.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo1a.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo3a.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo4.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo5.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo6.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo7.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo8.gif
OLD_FILES+=usr/share/doc/ntp/pic/pzf509.jpg
OLD_FILES+=usr/share/doc/ntp/pic/pzf511.jpg
OLD_FILES+=usr/share/doc/ntp/pic/rabbit.gif
OLD_FILES+=usr/share/doc/ntp/pic/radio2.jpg
OLD_FILES+=usr/share/doc/ntp/pic/sheepb.jpg
OLD_FILES+=usr/share/doc/ntp/pic/stack1a.jpg
OLD_FILES+=usr/share/doc/ntp/pic/stats.gif
OLD_FILES+=usr/share/doc/ntp/pic/sx5.gif
OLD_FILES+=usr/share/doc/ntp/pic/thunderbolt.jpg
OLD_FILES+=usr/share/doc/ntp/pic/time1.gif
OLD_FILES+=usr/share/doc/ntp/pic/tonea.gif
OLD_FILES+=usr/share/doc/ntp/pic/tribeb.gif
OLD_FILES+=usr/share/doc/ntp/pic/wingdorothy.gif
OLD_FILES+=usr/share/doc/ntp/poll.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/doc/ntp/pps.html
OLD_FILES+=usr/share/doc/ntp/prefer.html
OLD_FILES+=usr/share/doc/ntp/quick.html
OLD_FILES+=usr/share/doc/ntp/rate.html
OLD_FILES+=usr/share/doc/ntp/rdebug.html
OLD_FILES+=usr/share/doc/ntp/refclock.html
OLD_FILES+=usr/share/doc/ntp/release.html
OLD_FILES+=usr/share/doc/ntp/scripts/accopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/audio.txt
OLD_FILES+=usr/share/doc/ntp/scripts/authopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/clockopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/command.txt
OLD_FILES+=usr/share/doc/ntp/scripts/config.txt
OLD_FILES+=usr/share/doc/ntp/scripts/confopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/external.txt
OLD_FILES+=usr/share/doc/ntp/scripts/footer.txt
OLD_FILES+=usr/share/doc/ntp/scripts/hand.txt
OLD_FILES+=usr/share/doc/ntp/scripts/install.txt
OLD_FILES+=usr/share/doc/ntp/scripts/manual.txt
OLD_FILES+=usr/share/doc/ntp/scripts/misc.txt
OLD_FILES+=usr/share/doc/ntp/scripts/miscopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/monopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/refclock.txt
OLD_FILES+=usr/share/doc/ntp/scripts/special.txt
OLD_FILES+=usr/share/doc/ntp/scripts/style.css
OLD_FILES+=usr/share/doc/ntp/select.html
OLD_FILES+=usr/share/doc/ntp/sitemap.html
OLD_FILES+=usr/share/doc/ntp/sntp.html
OLD_FILES+=usr/share/doc/ntp/stats.html
OLD_FILES+=usr/share/doc/ntp/tickadj.html
OLD_FILES+=usr/share/doc/ntp/warp.html
OLD_FILES+=usr/share/doc/ntp/xleave.html
OLD_DIRS+=usr/share/doc/ntp/drivers
OLD_DIRS+=usr/share/doc/ntp/drivers/scripts
OLD_DIRS+=usr/share/doc/ntp/drivers/icons
OLD_DIRS+=usr/share/doc/ntp/hints
OLD_DIRS+=usr/share/doc/ntp/icons
OLD_DIRS+=usr/share/doc/ntp/pic
OLD_DIRS+=usr/share/doc/ntp/scripts
OLD_DIRS+=usr/share/doc/ntp
OLD_FILES+=usr/share/examples/etc/ntp.conf
OLD_FILES+=usr/share/man/man1/sntp.1.gz
OLD_FILES+=usr/share/man/man5/ntp.conf.5.gz
OLD_FILES+=usr/share/man/man5/ntp.keys.5.gz
OLD_FILES+=usr/share/man/man8/ntp-keygen.8.gz
OLD_FILES+=usr/share/man/man8/ntpd.8.gz
OLD_FILES+=usr/share/man/man8/ntpdate.8.gz
OLD_FILES+=usr/share/man/man8/ntpdc.8.gz
OLD_FILES+=usr/share/man/man8/ntpq.8.gz
OLD_FILES+=usr/share/man/man8/ntptime.8.gz
.endif
.if ${MK_OPENMP} == no
.if ${MK_GCC} == no
OLD_FILES+=usr/include/omp.h
OLD_LIBS+=usr/lib/libgomp.so
OLD_LIBS+=usr/lib32/libgomp.so
.endif
OLD_LIBS+=usr/lib/libomp.so
OLD_LIBS+=usr/lib32/libomp.so
.endif
.if ${MK_OPENSSH} == no
OLD_FILES+=etc/rc.d/sshd
OLD_FILES+=etc/ssh/moduli
OLD_FILES+=etc/ssh/ssh_config
OLD_FILES+=etc/ssh/sshd_config
OLD_FILES+=usr/bin/scp
OLD_FILES+=usr/bin/sftp
OLD_FILES+=usr/bin/slogin
OLD_FILES+=usr/bin/ssh
OLD_FILES+=usr/bin/ssh-add
OLD_FILES+=usr/bin/ssh-agent
OLD_FILES+=usr/bin/ssh-copy-id
OLD_FILES+=usr/bin/ssh-keygen
OLD_FILES+=usr/bin/ssh-keyscan
OLD_FILES+=usr/lib/pam_ssh.so
OLD_LIBS+=usr/lib/pam_ssh.so.6
OLD_FILES+=usr/lib/private/libssh.a
OLD_FILES+=usr/lib/private/libssh.so
OLD_LIBS+=usr/lib/private/libssh.so.5
OLD_FILES+=usr/lib/private/libssh_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/pam_ssh.so
OLD_LIBS+=usr/lib32/pam_ssh.so.6
OLD_FILES+=usr/lib32/private/libssh.a
OLD_FILES+=usr/lib32/private/libssh.so
OLD_LIBS+=usr/lib32/private/libssh.so.5
OLD_FILES+=usr/lib32/private/libssh_p.a
.endif
OLD_FILES+=usr/libexec/sftp-server
OLD_FILES+=usr/libexec/ssh-keysign
OLD_FILES+=usr/libexec/ssh-pkcs11-helper
OLD_FILES+=usr/sbin/sshd
OLD_FILES+=usr/share/man/man1/scp.1.gz
OLD_FILES+=usr/share/man/man1/sftp.1.gz
OLD_FILES+=usr/share/man/man1/slogin.1.gz
OLD_FILES+=usr/share/man/man1/ssh-add.1.gz
OLD_FILES+=usr/share/man/man1/ssh-agent.1.gz
OLD_FILES+=usr/share/man/man1/ssh-copy-id.1.gz
OLD_FILES+=usr/share/man/man1/ssh-keygen.1.gz
OLD_FILES+=usr/share/man/man1/ssh-keyscan.1.gz
OLD_FILES+=usr/share/man/man1/ssh.1.gz
OLD_FILES+=usr/share/man/man5/ssh_config.5.gz
OLD_FILES+=usr/share/man/man5/sshd_config.5.gz
OLD_FILES+=usr/share/man/man8/pam_ssh.8.gz
OLD_FILES+=usr/share/man/man8/sftp-server.8.gz
OLD_FILES+=usr/share/man/man8/ssh-keysign.8.gz
OLD_FILES+=usr/share/man/man8/ssh-pkcs11-helper.8.gz
OLD_FILES+=usr/share/man/man8/sshd.8.gz
.endif
.if ${MK_OPENSSL} == no
OLD_FILES+=etc/rc.d/keyserv
.endif
.if ${MK_PC_SYSINSTALL} == no
# backend-partmanager
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/create-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/delete-part.sh
# backend-query
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-emulation.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-laptop.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-nics.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-info.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/enable-net.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/get-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-components.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-config.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-mirrors.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-rsync-backups.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-tzones.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/query-langs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/send-logs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/setup-ssh-keys.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/set-mirror.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/sys-mem.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-live.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-netup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/update-part-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-layouts.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-models.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-variants.sh
# backend
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-bsdlabel.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-cleanup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-disk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-extractimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-ftp.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installcomponents.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installpackages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-localize.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountdisk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountoptical.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-networking.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-newfs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-parse.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-runcommands.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-unmount.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-upgrade.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-users.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/installimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/parseconfig.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/startautoinstall.sh
# conf
OLD_FILES+=usr/share/pc-sysinstall/conf/avail-langs
OLD_FILES+=usr/share/pc-sysinstall/conf/exclude-from-upgrade
OLD_FILES+=usr/share/pc-sysinstall/conf/license/bsd-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/intel-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/nvidia-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/pc-sysinstall.conf
# doc
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-list
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-size
OLD_FILES+=usr/share/pc-sysinstall/doc/help-index
OLD_FILES+=usr/share/pc-sysinstall/doc/help-start-autoinstall
# examples
OLD_FILES+=usr/share/examples/pc-sysinstall/README
OLD_FILES+=usr/share/examples/pc-sysinstall/pc-autoinstall.conf
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.fbsd-netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.geli
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.gmirror
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.restore
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.rsync
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.upgrade
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.zfs
# pc-sysinstall
OLD_FILES+=usr/sbin/pc-sysinstall
OLD_FILES+=usr/share/man/man8/pc-sysinstall.8.gz
OLD_DIRS+=usr/share/pc-sysinstall/backend
OLD_DIRS+=usr/share/pc-sysinstall/backend-partmanager
OLD_DIRS+=usr/share/pc-sysinstall/backend-query
OLD_DIRS+=usr/share/pc-sysinstall/conf/license
OLD_DIRS+=usr/share/pc-sysinstall/conf
OLD_DIRS+=usr/share/pc-sysinstall/doc
OLD_DIRS+=usr/share/pc-sysinstall
OLD_DIRS+=usr/share/examples/pc-sysinstall
.endif
.if ${MK_PF} == no
OLD_FILES+=etc/newsyslog.conf.d/pf.conf
OLD_FILES+=etc/periodic/security/520.pfdenied
OLD_FILES+=etc/pf.os
OLD_FILES+=etc/rc.d/ftp-proxy
OLD_FILES+=sbin/pfctl
OLD_FILES+=sbin/pflogd
OLD_FILES+=usr/include/netpfil/pf/pf.h
OLD_FILES+=usr/include/netpfil/pf/pf_altq.h
OLD_FILES+=usr/include/netpfil/pf/pf_mtag.h
OLD_FILES+=usr/lib/snmp_pf.so
OLD_LIBS+=usr/lib/snmp_pf.so.6
OLD_FILES+=usr/libexec/tftp-proxy
OLD_FILES+=usr/sbin/ftp-proxy
OLD_FILES+=usr/share/examples/etc/pf.os
OLD_FILES+=usr/share/examples/pf/ackpri
OLD_FILES+=usr/share/examples/pf/faq-example1
OLD_FILES+=usr/share/examples/pf/faq-example2
OLD_FILES+=usr/share/examples/pf/faq-example3
OLD_FILES+=usr/share/examples/pf/pf.conf
OLD_FILES+=usr/share/examples/pf/queue1
OLD_FILES+=usr/share/examples/pf/queue2
OLD_FILES+=usr/share/examples/pf/queue3
OLD_FILES+=usr/share/examples/pf/queue4
OLD_FILES+=usr/share/examples/pf/spamd
OLD_DIRS+=usr/share/examples/pf
OLD_FILES+=usr/share/man/man4/pf.4.gz
OLD_FILES+=usr/share/man/man4/pflog.4.gz
OLD_FILES+=usr/share/man/man4/pfsync.4.gz
OLD_FILES+=usr/share/man/man5/pf.conf.5.gz
OLD_FILES+=usr/share/man/man5/pf.os.5.gz
OLD_FILES+=usr/share/man/man8/ftp-proxy.8.gz
OLD_FILES+=usr/share/man/man8/pfctl.8.gz
OLD_FILES+=usr/share/man/man8/pflogd.8.gz
OLD_FILES+=usr/share/man/man8/tftp-proxy.8.gz
OLD_FILES+=usr/share/snmp/defs/pf_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-PF-MIB.txt
.endif
.if ${MK_PKGBOOTSTRAP} == no
OLD_FILES+=usr/sbin/pkg
OLD_FILES+=usr/share/man/man7/pkg.7.gz
.endif
.if ${MK_PMC} == no
OLD_FILES+=usr/bin/pmcstudy
.if ${TARGET_ARCH} == "amd64"
OLD_FILES+=usr/include/libipt/pt_last_ip.h
OLD_FILES+=usr/include/libipt/intel-pt.h
OLD_FILES+=usr/include/libipt/pt_time.h
OLD_FILES+=usr/include/libipt/pt_cpu.h
OLD_FILES+=usr/include/libipt/pt_compiler.h
OLD_DIRS+=usr/include/libipt
.endif
.if ${TARGET_ARCH} == "aarch64"
OLD_FILES+=usr/include/opencsd/c_api/opencsd_c_api.h
OLD_FILES+=usr/include/opencsd/c_api/ocsd_c_api_cust_impl.h
OLD_FILES+=usr/include/opencsd/c_api/ocsd_c_api_types.h
OLD_FILES+=usr/include/opencsd/c_api/ocsd_c_api_cust_fact.h
OLD_FILES+=usr/include/opencsd/c_api/ocsd_c_api_custom.h
OLD_DIRS+=usr/include/opencsd/c_api
OLD_FILES+=usr/include/opencsd/ocsd_if_types.h
OLD_FILES+=usr/include/opencsd/ptm/trc_dcd_mngr_ptm.h
OLD_FILES+=usr/include/opencsd/ptm/trc_pkt_proc_ptm.h
OLD_FILES+=usr/include/opencsd/ptm/trc_cmp_cfg_ptm.h
OLD_FILES+=usr/include/opencsd/ptm/ptm_decoder.h
OLD_FILES+=usr/include/opencsd/ptm/trc_pkt_elem_ptm.h
OLD_FILES+=usr/include/opencsd/ptm/trc_pkt_decode_ptm.h
OLD_FILES+=usr/include/opencsd/ptm/trc_pkt_types_ptm.h
OLD_DIRS+=usr/include/opencsd/ptm
OLD_FILES+=usr/include/opencsd/trc_gen_elem_types.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_proc_etmv4.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_etmv4_stack_elem.h
OLD_FILES+=usr/include/opencsd/etmv4/etmv4_decoder.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_elem_etmv4i.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_dcd_mngr_etmv4i.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_types_etmv4.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_elem_etmv4d.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_pkt_decode_etmv4i.h
OLD_FILES+=usr/include/opencsd/etmv4/trc_cmp_cfg_etmv4.h
OLD_DIRS+=usr/include/opencsd/etmv4
OLD_FILES+=usr/include/opencsd/etmv3/trc_pkt_decode_etmv3.h
OLD_FILES+=usr/include/opencsd/etmv3/trc_cmp_cfg_etmv3.h
OLD_FILES+=usr/include/opencsd/etmv3/etmv3_decoder.h
OLD_FILES+=usr/include/opencsd/etmv3/trc_pkt_proc_etmv3.h
OLD_FILES+=usr/include/opencsd/etmv3/trc_pkt_elem_etmv3.h
OLD_FILES+=usr/include/opencsd/etmv3/trc_pkt_types_etmv3.h
OLD_FILES+=usr/include/opencsd/etmv3/trc_dcd_mngr_etmv3.h
OLD_DIRS+=usr/include/opencsd/etmv3
OLD_FILES+=usr/include/opencsd/trc_pkt_types.h
OLD_FILES+=usr/include/opencsd/stm/trc_pkt_proc_stm.h
OLD_FILES+=usr/include/opencsd/stm/trc_pkt_types_stm.h
OLD_FILES+=usr/include/opencsd/stm/stm_decoder.h
OLD_FILES+=usr/include/opencsd/stm/trc_dcd_mngr_stm.h
OLD_FILES+=usr/include/opencsd/stm/trc_cmp_cfg_stm.h
OLD_FILES+=usr/include/opencsd/stm/trc_pkt_elem_stm.h
OLD_FILES+=usr/include/opencsd/stm/trc_pkt_decode_stm.h
OLD_DIRS+=usr/include/opencsd/stm
OLD_DIRS+=usr/include/opencsd
.endif
OLD_FILES+=usr/include/pmc.h
OLD_FILES+=usr/include/pmclog.h
OLD_FILES+=usr/include/pmcformat.h
OLD_FILES+=usr/include/libpmcstat.h
.if ${TARGET_ARCH} == "amd64"
OLD_FILES+=usr/lib/libipt.a
OLD_FILES+=usr/lib/libipt.so
OLD_LIBS+=lib/libipt.so.0
OLD_FILES+=usr/lib/libipt_p.a
.endif
.if ${TARGET_ARCH} == "aarch64"
OLD_FILES+=usr/lib/libopencsd.a
OLD_FILES+=usr/lib/libopencsd.so
OLD_LIBS+=lib/libopencsd.so.0
OLD_FILES+=usr/lib/libopencsd_p.a
.endif
OLD_FILES+=usr/lib/libpmc.a
OLD_FILES+=usr/lib/libpmc.so
OLD_LIBS+=usr/lib/libpmc.so.5
OLD_FILES+=usr/lib/libpmc_p.a
OLD_FILES+=usr/lib32/libpmc.a
OLD_FILES+=usr/lib32/libpmc.so
OLD_LIBS+=usr/lib32/libpmc.so.5
OLD_FILES+=usr/lib32/libpmc_p.a
OLD_FILES+=usr/sbin/pmc
OLD_FILES+=usr/sbin/pmcannotate
OLD_FILES+=usr/sbin/pmccontrol
OLD_FILES+=usr/sbin/pmcstat
OLD_FILES+=usr/share/man/man3/pmc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.atom.3.gz
OLD_FILES+=usr/share/man/man3/pmc.atomsilvermont.3.gz
OLD_FILES+=usr/share/man/man3/pmc.core.3.gz
OLD_FILES+=usr/share/man/man3/pmc.core2.3.gz
OLD_FILES+=usr/share/man/man3/pmc.corei7.3.gz
OLD_FILES+=usr/share/man/man3/pmc.corei7uc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.haswell.3.gz
OLD_FILES+=usr/share/man/man3/pmc.haswelluc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.haswellxeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.iaf.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ivybridge.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ivybridgexeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.k7.3.gz
OLD_FILES+=usr/share/man/man3/pmc.k8.3.gz
OLD_FILES+=usr/share/man/man3/pmc.mips24k.3.gz
OLD_FILES+=usr/share/man/man3/pmc.octeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p4.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p5.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p6.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridge.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridgeuc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridgexeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.soft.3.gz
OLD_FILES+=usr/share/man/man3/pmc.tsc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ucf.3.gz
OLD_FILES+=usr/share/man/man3/pmc.westmere.3.gz
OLD_FILES+=usr/share/man/man3/pmc.westmereuc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.xscale.3.gz
OLD_FILES+=usr/share/man/man3/pmc_allocate.3.gz
OLD_FILES+=usr/share/man/man3/pmc_attach.3.gz
OLD_FILES+=usr/share/man/man3/pmc_capabilities.3.gz
OLD_FILES+=usr/share/man/man3/pmc_configure_logfile.3.gz
OLD_FILES+=usr/share/man/man3/pmc_cpuinfo.3.gz
OLD_FILES+=usr/share/man/man3/pmc_detach.3.gz
OLD_FILES+=usr/share/man/man3/pmc_disable.3.gz
OLD_FILES+=usr/share/man/man3/pmc_enable.3.gz
OLD_FILES+=usr/share/man/man3/pmc_event_names_of_class.3.gz
OLD_FILES+=usr/share/man/man3/pmc_flush_logfile.3.gz
OLD_FILES+=usr/share/man/man3/pmc_get_driver_stats.3.gz
OLD_FILES+=usr/share/man/man3/pmc_get_msr.3.gz
OLD_FILES+=usr/share/man/man3/pmc_init.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_capability.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_class.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_cputype.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_disposition.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_event.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_mode.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_state.3.gz
OLD_FILES+=usr/share/man/man3/pmc_ncpu.3.gz
OLD_FILES+=usr/share/man/man3/pmc_npmc.3.gz
OLD_FILES+=usr/share/man/man3/pmc_pmcinfo.3.gz
OLD_FILES+=usr/share/man/man3/pmc_read.3.gz
OLD_FILES+=usr/share/man/man3/pmc_release.3.gz
OLD_FILES+=usr/share/man/man3/pmc_rw.3.gz
OLD_FILES+=usr/share/man/man3/pmc_set.3.gz
OLD_FILES+=usr/share/man/man3/pmc_start.3.gz
OLD_FILES+=usr/share/man/man3/pmc_stop.3.gz
OLD_FILES+=usr/share/man/man3/pmc_width.3.gz
OLD_FILES+=usr/share/man/man3/pmc_write.3.gz
OLD_FILES+=usr/share/man/man3/pmc_writelog.3.gz
OLD_FILES+=usr/share/man/man3/pmclog.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_close.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_feed.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_open.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_read.3.gz
OLD_FILES+=usr/share/man/man8/pmcannotate.8.gz
OLD_FILES+=usr/share/man/man8/pmccontrol.8.gz
OLD_FILES+=usr/share/man/man8/pmcstat.8.gz
OLD_FILES+=usr/share/man/man8/pmcstudy.8.gz
.endif
.if ${MK_PORTSNAP} == no
OLD_FILES+=etc/portsnap.conf
OLD_FILES+=usr/libexec/make_index
OLD_FILES+=usr/libexec/phttpget
OLD_FILES+=usr/sbin/portsnap
OLD_FILES+=usr/share/examples/etc/portsnap.conf
OLD_FILES+=usr/share/man/man8/phttpget.8.gz
OLD_FILES+=usr/share/man/man8/portsnap.8.gz
.endif
.if ${MK_PPP} == no
OLD_FILES+=etc/newsyslog.conf.d/ppp.conf
OLD_FILES+=etc/ppp/ppp.conf
OLD_FILES+=etc/syslog.d/ppp.conf
OLD_DIRS+=etc/ppp
OLD_FILES+=usr/sbin/ppp
OLD_FILES+=usr/sbin/pppctl
OLD_FILES+=usr/share/man/man8/ppp.8.gz
OLD_FILES+=usr/share/man/man8/pppctl.8.gz
.endif
.if ${MK_PROFILE} == no
OLD_FILES+=usr/lib/lib80211_p.a
OLD_FILES+=usr/lib/libBlocksRuntime_p.a
OLD_FILES+=usr/lib/libalias_cuseeme_p.a
OLD_FILES+=usr/lib/libalias_dummy_p.a
OLD_FILES+=usr/lib/libalias_ftp_p.a
OLD_FILES+=usr/lib/libalias_irc_p.a
OLD_FILES+=usr/lib/libalias_nbt_p.a
OLD_FILES+=usr/lib/libalias_p.a
OLD_FILES+=usr/lib/libalias_pptp_p.a
OLD_FILES+=usr/lib/libalias_skinny_p.a
OLD_FILES+=usr/lib/libalias_smedia_p.a
OLD_FILES+=usr/lib/libarchive_p.a
OLD_FILES+=usr/lib/libasn1_p.a
OLD_FILES+=usr/lib/libauditd_p.a
OLD_FILES+=usr/lib/libavl_p.a
OLD_FILES+=usr/lib/libbe_p.a
OLD_FILES+=usr/lib/libbegemot_p.a
OLD_FILES+=usr/lib/libblacklist_p.a
OLD_FILES+=usr/lib/libbluetooth_p.a
OLD_FILES+=usr/lib/libbsdxml_p.a
OLD_FILES+=usr/lib/libbsm_p.a
OLD_FILES+=usr/lib/libbsnmp_p.a
OLD_FILES+=usr/lib/libbz2_p.a
OLD_FILES+=usr/lib/libc++_p.a
OLD_FILES+=usr/lib/libc_p.a
OLD_FILES+=usr/lib/libcalendar_p.a
OLD_FILES+=usr/lib/libcam_p.a
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib/libcompat_p.a
OLD_FILES+=usr/lib/libcompiler_rt_p.a
OLD_FILES+=usr/lib/libcrypt_p.a
OLD_FILES+=usr/lib/libcrypto_p.a
OLD_FILES+=usr/lib/libctf_p.a
OLD_FILES+=usr/lib/libcurses_p.a
OLD_FILES+=usr/lib/libcursesw_p.a
OLD_FILES+=usr/lib/libcuse_p.a
OLD_FILES+=usr/lib/libcxxrt_p.a
OLD_FILES+=usr/lib/libdevctl_p.a
OLD_FILES+=usr/lib/libdevinfo_p.a
OLD_FILES+=usr/lib/libdevstat_p.a
OLD_FILES+=usr/lib/libdialog_p.a
OLD_FILES+=usr/lib/libdl_p.a
OLD_FILES+=usr/lib/libdpv_p.a
OLD_FILES+=usr/lib/libdtrace_p.a
OLD_FILES+=usr/lib/libdwarf_p.a
OLD_FILES+=usr/lib/libedit_p.a
OLD_FILES+=usr/lib/libefivar_p.a
OLD_FILES+=usr/lib/libelf_p.a
OLD_FILES+=usr/lib/libexecinfo_p.a
OLD_FILES+=usr/lib/libfetch_p.a
OLD_FILES+=usr/lib/libfigpar_p.a
OLD_FILES+=usr/lib/libfl_p.a
OLD_FILES+=usr/lib/libform_p.a
OLD_FILES+=usr/lib/libformw_p.a
OLD_FILES+=usr/lib/libgcc_eh_p.a
OLD_FILES+=usr/lib/libgcc_p.a
OLD_FILES+=usr/lib/libgeom_p.a
OLD_FILES+=usr/lib/libgnuregex_p.a
OLD_FILES+=usr/lib/libgpio_p.a
OLD_FILES+=usr/lib/libgssapi_krb5_p.a
OLD_FILES+=usr/lib/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib/libgssapi_p.a
OLD_FILES+=usr/lib/libgssapi_spnego_p.a
OLD_FILES+=usr/lib/libhdb_p.a
OLD_FILES+=usr/lib/libheimbase_p.a
OLD_FILES+=usr/lib/libheimntlm_p.a
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib/libhistory_p.a
OLD_FILES+=usr/lib/libhx509_p.a
OLD_FILES+=usr/lib/libipsec_p.a
OLD_FILES+=usr/lib/libipt_p.a
OLD_FILES+=usr/lib/libjail_p.a
OLD_FILES+=usr/lib/libkadm5clnt_p.a
OLD_FILES+=usr/lib/libkadm5srv_p.a
OLD_FILES+=usr/lib/libkafs5_p.a
OLD_FILES+=usr/lib/libkdc_p.a
OLD_FILES+=usr/lib/libkiconv_p.a
OLD_FILES+=usr/lib/libkrb5_p.a
OLD_FILES+=usr/lib/libkvm_p.a
OLD_FILES+=usr/lib/libl_p.a
OLD_FILES+=usr/lib/libln_p.a
OLD_FILES+=usr/lib/liblzma_p.a
OLD_FILES+=usr/lib/libm_p.a
OLD_FILES+=usr/lib/libmagic_p.a
OLD_FILES+=usr/lib/libmd_p.a
OLD_FILES+=usr/lib/libmemstat_p.a
OLD_FILES+=usr/lib/libmenu_p.a
OLD_FILES+=usr/lib/libmenuw_p.a
OLD_FILES+=usr/lib/libmilter_p.a
OLD_FILES+=usr/lib/libmp_p.a
OLD_FILES+=usr/lib/libmt_p.a
OLD_FILES+=usr/lib/libncurses_p.a
OLD_FILES+=usr/lib/libncursesw_p.a
OLD_FILES+=usr/lib/libnetgraph_p.a
OLD_FILES+=usr/lib/libngatm_p.a
OLD_FILES+=usr/lib/libnv_p.a
OLD_FILES+=usr/lib/libnvpair_p.a
OLD_FILES+=usr/lib/libopencsd_p.a
OLD_FILES+=usr/lib/libopie_p.a
OLD_FILES+=usr/lib/libpanel_p.a
OLD_FILES+=usr/lib/libpanelw_p.a
OLD_FILES+=usr/lib/libpathconv_p.a
OLD_FILES+=usr/lib/libpcap_p.a
OLD_FILES+=usr/lib/libpjdlog_p.a
OLD_FILES+=usr/lib/libpmc_p.a
OLD_FILES+=usr/lib/libprivatebsdstat_p.a
OLD_FILES+=usr/lib/libprivatedevdctl_p.a
OLD_FILES+=usr/lib/libprivateevent_p.a
OLD_FILES+=usr/lib/libprivateheimipcc_p.a
OLD_FILES+=usr/lib/libprivateheimipcs_p.a
OLD_FILES+=usr/lib/libprivateifconfig_p.a
OLD_FILES+=usr/lib/libprivateldns_p.a
OLD_FILES+=usr/lib/libprivatesqlite3_p.a
OLD_FILES+=usr/lib/libprivatessh_p.a
OLD_FILES+=usr/lib/libprivateucl_p.a
OLD_FILES+=usr/lib/libprivateunbound_p.a
OLD_FILES+=usr/lib/libprivatezstd_p.a
OLD_FILES+=usr/lib/libproc_p.a
OLD_FILES+=usr/lib/libprocstat_p.a
OLD_FILES+=usr/lib/libpthread_p.a
OLD_FILES+=usr/lib/libradius_p.a
OLD_FILES+=usr/lib/libregex_p.a
OLD_FILES+=usr/lib/libroken_p.a
OLD_FILES+=usr/lib/librpcsvc_p.a
OLD_FILES+=usr/lib/librss_p.a
OLD_FILES+=usr/lib/librt_p.a
OLD_FILES+=usr/lib/librtld_db_p.a
OLD_FILES+=usr/lib/libsbuf_p.a
OLD_FILES+=usr/lib/libsdp_p.a
OLD_FILES+=usr/lib/libsmb_p.a
OLD_FILES+=usr/lib/libssl_p.a
OLD_FILES+=usr/lib/libstdbuf_p.a
OLD_FILES+=usr/lib/libstdc++_p.a
OLD_FILES+=usr/lib/libstdthreads_p.a
OLD_FILES+=usr/lib/libsupc++_p.a
OLD_FILES+=usr/lib/libsysdecode_p.a
OLD_FILES+=usr/lib/libtacplus_p.a
OLD_FILES+=usr/lib/libtermcap_p.a
OLD_FILES+=usr/lib/libtermcapw_p.a
OLD_FILES+=usr/lib/libtermlib_p.a
OLD_FILES+=usr/lib/libtermlibw_p.a
OLD_FILES+=usr/lib/libthr_p.a
OLD_FILES+=usr/lib/libthread_db_p.a
OLD_FILES+=usr/lib/libtinfo_p.a
OLD_FILES+=usr/lib/libtinfow_p.a
OLD_FILES+=usr/lib/libufs_p.a
OLD_FILES+=usr/lib/libugidfw_p.a
OLD_FILES+=usr/lib/libulog_p.a
OLD_FILES+=usr/lib/libumem_p.a
OLD_FILES+=usr/lib/libusb_p.a
OLD_FILES+=usr/lib/libusbhid_p.a
OLD_FILES+=usr/lib/libutempter_p.a
OLD_FILES+=usr/lib/libutil_p.a
OLD_FILES+=usr/lib/libuutil_p.a
OLD_FILES+=usr/lib/libvgl_p.a
OLD_FILES+=usr/lib/libvmmapi_p.a
OLD_FILES+=usr/lib/libwind_p.a
OLD_FILES+=usr/lib/libwrap_p.a
OLD_FILES+=usr/lib/libxo_p.a
OLD_FILES+=usr/lib/liby_p.a
OLD_FILES+=usr/lib/libypclnt_p.a
OLD_FILES+=usr/lib/libz_p.a
OLD_FILES+=usr/lib/libzfs_core_p.a
OLD_FILES+=usr/lib/libzfs_p.a
OLD_FILES+=usr/lib/private/libldns_p.a
OLD_FILES+=usr/lib/private/libssh_p.a
.endif
.if ${MK_QUOTAS} == no
OLD_FILES+=sbin/quotacheck
OLD_FILES+=usr/bin/quota
OLD_FILES+=usr/sbin/edquota
OLD_FILES+=usr/sbin/quotaoff
OLD_FILES+=usr/sbin/quotaon
OLD_FILES+=usr/sbin/repquota
OLD_FILES+=usr/share/man/man1/quota.1.gz
OLD_FILES+=usr/share/man/man8/edquota.8.gz
OLD_FILES+=usr/share/man/man8/quotacheck.8.gz
OLD_FILES+=usr/share/man/man8/quotaoff.8.gz
OLD_FILES+=usr/share/man/man8/quotaon.8.gz
OLD_FILES+=usr/share/man/man8/repquota.8.gz
.endif
.if ${MK_RADIUS_SUPPORT} == no
OLD_FILES+=usr/lib/libradius.a
OLD_FILES+=usr/lib/libradius.so
OLD_LIBS+=usr/lib/libradius.so.4
OLD_FILES+=usr/lib/libradius_p.a
OLD_FILES+=usr/lib/pam_radius.so
OLD_LIBS+=usr/lib/pam_radius.so.6
OLD_FILES+=usr/lib32/libradius.a
OLD_FILES+=usr/lib32/libradius.so
OLD_LIBS+=usr/lib32/libradius.so.4
OLD_FILES+=usr/lib32/libradius_p.a
OLD_FILES+=usr/lib32/pam_radius.so
OLD_LIBS+=usr/lib32/pam_radius.so.6
OLD_FILES+=usr/include/radlib.h
OLD_FILES+=usr/include/radlib_vs.h
OLD_FILES+=usr/share/man/man3/libradius.3.gz
OLD_FILES+=usr/share/man/man3/rad_acct_open.3.gz
OLD_FILES+=usr/share/man/man3/rad_add_server.3.gz
OLD_FILES+=usr/share/man/man3/rad_add_server_ex.3.gz
OLD_FILES+=usr/share/man/man3/rad_auth_open.3.gz
OLD_FILES+=usr/share/man/man3/rad_bind_to.3.gz
OLD_FILES+=usr/share/man/man3/rad_close.3.gz
OLD_FILES+=usr/share/man/man3/rad_config.3.gz
OLD_FILES+=usr/share/man/man3/rad_continue_send_request.3.gz
OLD_FILES+=usr/share/man/man3/rad_create_request.3.gz
OLD_FILES+=usr/share/man/man3/rad_create_response.3.gz
OLD_FILES+=usr/share/man/man3/rad_cvt_addr.3.gz
OLD_FILES+=usr/share/man/man3/rad_cvt_int.3.gz
OLD_FILES+=usr/share/man/man3/rad_cvt_string.3.gz
OLD_FILES+=usr/share/man/man3/rad_demangle.3.gz
OLD_FILES+=usr/share/man/man3/rad_demangle_mppe_key.3.gz
OLD_FILES+=usr/share/man/man3/rad_get_attr.3.gz
OLD_FILES+=usr/share/man/man3/rad_get_vendor_attr.3.gz
OLD_FILES+=usr/share/man/man3/rad_init_send_request.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_addr.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_attr.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_int.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_message_authentic.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_string.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_vendor_addr.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_vendor_attr.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_vendor_int.3.gz
OLD_FILES+=usr/share/man/man3/rad_put_vendor_string.3.gz
OLD_FILES+=usr/share/man/man3/rad_receive_request.3.gz
OLD_FILES+=usr/share/man/man3/rad_request_authenticator.3.gz
OLD_FILES+=usr/share/man/man3/rad_send_request.3.gz
OLD_FILES+=usr/share/man/man3/rad_send_response.3.gz
OLD_FILES+=usr/share/man/man3/rad_server_open.3.gz
OLD_FILES+=usr/share/man/man3/rad_server_secret.3.gz
OLD_FILES+=usr/share/man/man3/rad_strerror.3.gz
OLD_FILES+=usr/share/man/man5/radius.conf.5.gz
OLD_FILES+=usr/share/man/man8/pam_radius.8.gz
.endif
.if ${MK_RBOOTD} == no
OLD_FILES+=usr/libexec/rbootd
OLD_FILES+=usr/share/man/man8/rbootd.8.gz
.endif
.if ${MK_RESCUE} == no
. if exists(${DESTDIR}${TESTSBASE})
RESCUE_DIRS!=find ${DESTDIR}/rescue -type d 2>/dev/null | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${RESCUE_DIRS}
RESCUE_FILES!=find ${DESTDIR}/rescue \! -type d 2>/dev/null | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${RESCUE_FILES}
. endif
.endif
.if ${MK_ROUTED} == no
OLD_FILES+=rescue/routed
OLD_FILES+=rescue/rtquery
OLD_FILES+=sbin/routed
OLD_FILES+=sbin/rtquery
OLD_FILES+=usr/share/man/man8/routed.8.gz
OLD_FILES+=usr/share/man/man8/rtquery.8.gz
.endif
.if ${MK_SENDMAIL} == no
OLD_FILES+=etc/mtree/BSD.sendmail.dist
OLD_FILES+=etc/newsyslog.conf.d/sendmail.conf
OLD_FILES+=etc/periodic/daily/150.clean-hoststat
OLD_FILES+=etc/periodic/daily/440.status-mailq
OLD_FILES+=etc/periodic/daily/460.status-mail-rejects
OLD_FILES+=etc/periodic/daily/500.queuerun
OLD_FILES+=bin/rmail
OLD_FILES+=usr/bin/vacation
OLD_FILES+=usr/include/libmilter/mfapi.h
OLD_FILES+=usr/include/libmilter/mfdef.h
OLD_DIRS+=usr/include/libmilter
OLD_FILES+=usr/lib/libmilter.a
OLD_FILES+=usr/lib/libmilter.so
OLD_LIBS+=usr/lib/libmilter.so.5
OLD_FILES+=usr/lib/libmilter_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libmilter.a
OLD_FILES+=usr/lib32/libmilter.so
OLD_LIBS+=usr/lib32/libmilter.so.5
OLD_FILES+=usr/lib32/libmilter_p.a
.endif
OLD_FILES+=usr/libexec/mail.local
OLD_FILES+=usr/libexec/sendmail/sendmail
OLD_FILES+=usr/libexec/smrsh
OLD_FILES+=usr/sbin/editmap
OLD_FILES+=usr/sbin/mailstats
OLD_FILES+=usr/sbin/makemap
OLD_FILES+=usr/sbin/praliases
OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/08.sendmailop
OLD_FILES+=usr/share/man/man1/mailq.1.gz
OLD_FILES+=usr/share/man/man1/newaliases.1.gz
OLD_FILES+=usr/share/man/man1/vacation.1.gz
OLD_FILES+=usr/share/man/man5/aliases.5.gz
OLD_FILES+=usr/share/man/man8/editmap.8.gz
OLD_FILES+=usr/share/man/man8/hoststat.8.gz
OLD_FILES+=usr/share/man/man8/mail.local.8.gz
OLD_FILES+=usr/share/man/man8/mailstats.8.gz
OLD_FILES+=usr/share/man/man8/makemap.8.gz
OLD_FILES+=usr/share/man/man8/praliases.8.gz
OLD_FILES+=usr/share/man/man8/purgestat.8.gz
OLD_FILES+=usr/share/man/man8/rmail.8.gz
OLD_FILES+=usr/share/man/man8/sendmail.8.gz
OLD_FILES+=usr/share/man/man8/smrsh.8.gz
OLD_FILES+=usr/share/sendmail/cf/README
OLD_FILES+=usr/share/sendmail/cf/cf/Makefile
OLD_FILES+=usr/share/sendmail/cf/cf/README
OLD_FILES+=usr/share/sendmail/cf/cf/chez.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/clientproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux10.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux9.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-solaris2.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-sunos4.1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cyrusproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-bsd4.4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux10.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux9.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-linux.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-mpeix.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-nextstep3.3.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-solaris.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-sunos4.1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/huginn.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/knecht.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mail.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mail.eecs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mailspool.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/python.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/s2k-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/s2k-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/submit.cf
OLD_FILES+=usr/share/sendmail/cf/cf/submit.mc
OLD_FILES+=usr/share/sendmail/cf/cf/tcpproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/ucbarpa.mc
OLD_FILES+=usr/share/sendmail/cf/cf/ucbvax.mc
OLD_FILES+=usr/share/sendmail/cf/cf/uucpproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/vangogh.cs.mc
OLD_DIRS+=usr/share/sendmail/cf/cf
OLD_FILES+=usr/share/sendmail/cf/domain/Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/CS.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/EECS.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/S2K.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/berkeley-only.m4
OLD_FILES+=usr/share/sendmail/cf/domain/generic.m4
OLD_DIRS+=usr/share/sendmail/cf/domain
OLD_FILES+=usr/share/sendmail/cf/feature/accept_unqualified_senders.m4
OLD_FILES+=usr/share/sendmail/cf/feature/accept_unresolvable_domains.m4
OLD_FILES+=usr/share/sendmail/cf/feature/access_db.m4
OLD_FILES+=usr/share/sendmail/cf/feature/allmasquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/always_add_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/authinfo.m4
OLD_FILES+=usr/share/sendmail/cf/feature/badmx.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bcc.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bestmx_is_local.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bitdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/blacklist_recipients.m4
OLD_FILES+=usr/share/sendmail/cf/feature/block_bad_helo.m4
OLD_FILES+=usr/share/sendmail/cf/feature/compat_check.m4
OLD_FILES+=usr/share/sendmail/cf/feature/conncontrol.m4
OLD_FILES+=usr/share/sendmail/cf/feature/delay_checks.m4
OLD_FILES+=usr/share/sendmail/cf/feature/dnsbl.m4
OLD_FILES+=usr/share/sendmail/cf/feature/domaintable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/enhdnsbl.m4
OLD_FILES+=usr/share/sendmail/cf/feature/generics_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/genericstable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/greet_pause.m4
OLD_FILES+=usr/share/sendmail/cf/feature/ldap_routing.m4
OLD_FILES+=usr/share/sendmail/cf/feature/limited_masquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_lmtp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_no_masquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_procmail.m4
OLD_FILES+=usr/share/sendmail/cf/feature/lookupdotdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/loose_relay_check.m4
OLD_FILES+=usr/share/sendmail/cf/feature/mailertable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_envelope.m4
OLD_FILES+=usr/share/sendmail/cf/feature/msp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/mtamark.m4
OLD_FILES+=usr/share/sendmail/cf/feature/no_default_msa.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nocanonify.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nopercenthack.m4
OLD_FILES+=usr/share/sendmail/cf/feature/notsticky.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nouucp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nullclient.m4
OLD_FILES+=usr/share/sendmail/cf/feature/prefixmod.m4
OLD_FILES+=usr/share/sendmail/cf/feature/preserve_local_plus_detail.m4
OLD_FILES+=usr/share/sendmail/cf/feature/preserve_luser_host.m4
OLD_FILES+=usr/share/sendmail/cf/feature/promiscuous_relay.m4
OLD_FILES+=usr/share/sendmail/cf/feature/queuegroup.m4
OLD_FILES+=usr/share/sendmail/cf/feature/ratecontrol.m4
OLD_FILES+=usr/share/sendmail/cf/feature/redirect.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_based_on_MX.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_hosts_only.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_local_from.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_mail_from.m4
OLD_FILES+=usr/share/sendmail/cf/feature/require_rdns.m4
OLD_FILES+=usr/share/sendmail/cf/feature/smrsh.m4
OLD_FILES+=usr/share/sendmail/cf/feature/stickyhost.m4
OLD_FILES+=usr/share/sendmail/cf/feature/tls_session_features.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_client_ptr.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_ct_file.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_cw_file.m4
OLD_FILES+=usr/share/sendmail/cf/feature/uucpdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/virtuser_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/virtusertable.m4
OLD_DIRS+=usr/share/sendmail/cf/feature
OLD_FILES+=usr/share/sendmail/cf/hack/cssubdomain.m4
OLD_FILES+=usr/share/sendmail/cf/hack/xconnect.m4
OLD_DIRS+=usr/share/sendmail/cf/hack
OLD_FILES+=usr/share/sendmail/cf/m4/cf.m4
OLD_FILES+=usr/share/sendmail/cf/m4/cfhead.m4
OLD_FILES+=usr/share/sendmail/cf/m4/proto.m4
OLD_FILES+=usr/share/sendmail/cf/m4/version.m4
OLD_DIRS+=usr/share/sendmail/cf/m4
OLD_FILES+=usr/share/sendmail/cf/mailer/cyrus.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/cyrusv2.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/fax.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/local.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/mail11.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/phquery.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/pop.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/procmail.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/qpage.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/smtp.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/usenet.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/uucp.m4
OLD_DIRS+=usr/share/sendmail/cf/mailer
OLD_FILES+=usr/share/sendmail/cf/ostype/a-ux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix3.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/altos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/amdahl-uts.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.3.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi1.0.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi2.0.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/darwin.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dgux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/domainos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dragonfly.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dynix3.2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd6.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/gnu.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux10.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux11.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux9.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix6.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/isc4.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/linux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/maxion.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/mklinux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/mpeix.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/nextstep.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/openbsd.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/osf1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/powerux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/ptx2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/qnx.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/riscos4.5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sco-uw-2.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sco3.2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sinix.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris11.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.ml.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.pre5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris8.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sunos3.5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sunos4.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/svr4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/ultrix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmk.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmp.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unixware7.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unknown.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/uxpds.m4
OLD_DIRS+=usr/share/sendmail/cf/ostype
OLD_FILES+=usr/share/sendmail/cf/sendmail.schema
OLD_FILES+=usr/share/sendmail/cf/sh/makeinfo.sh
OLD_DIRS+=usr/share/sendmail/cf/sh
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.cogsci.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.old.arpa.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbarpa.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbvax.m4
OLD_DIRS+=usr/share/sendmail/cf/siteconfig
OLD_DIRS+=usr/share/sendmail/cf
OLD_DIRS+=usr/share/sendmail
OLD_DIRS+=var/spool/clientmqueue
.endif
.if ${MK_SERVICESDB} == no
OLD_FILES+=var/db/services.db
.endif
.if ${MK_SHAREDOCS} == no
OLD_FILES+=usr/share/doc/pjdfstest/README
OLD_DIRS+=usr/share/doc/pjdfstest
.endif
.if ${MK_SSP} == no
OLD_LIBS+=lib/libssp.so.0
OLD_FILES+=usr/include/ssp/ssp.h
OLD_FILES+=usr/include/ssp/stdio.h
OLD_FILES+=usr/include/ssp/string.h
OLD_FILES+=usr/include/ssp/unistd.h
OLD_FILES+=usr/lib/libssp.a
OLD_FILES+=usr/lib/libssp.so
OLD_FILES+=usr/lib/libssp_nonshared.a
OLD_FILES+=usr/lib32/libssp.a
OLD_FILES+=usr/lib32/libssp.so
OLD_LIBS+=usr/lib32/libssp.so.0
OLD_FILES+=usr/lib32/libssp_nonshared.a
OLD_FILES+=usr/tests/lib/libc/ssp/Kyuafile
OLD_FILES+=usr/tests/lib/libc/ssp/h_fgets
OLD_FILES+=usr/tests/lib/libc/ssp/h_getcwd
OLD_FILES+=usr/tests/lib/libc/ssp/h_gets
OLD_FILES+=usr/tests/lib/libc/ssp/h_memcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_memmove
OLD_FILES+=usr/tests/lib/libc/ssp/h_memset
OLD_FILES+=usr/tests/lib/libc/ssp/h_read
OLD_FILES+=usr/tests/lib/libc/ssp/h_readlink
OLD_FILES+=usr/tests/lib/libc/ssp/h_snprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_sprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_stpcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_stpncpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_strcat
OLD_FILES+=usr/tests/lib/libc/ssp/h_strcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_strncat
OLD_FILES+=usr/tests/lib/libc/ssp/h_strncpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_vsnprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_vsprintf
OLD_FILES+=usr/tests/lib/libc/ssp/ssp_test
.endif
.if ${MK_SYSCONS} == no
OLD_FILES+=usr/share/syscons/fonts/INDEX.fonts
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866b-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866c-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-wide-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-wide-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso09-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-rb-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-rc-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-1131-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-1251-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x8.fnt
OLD_FILES+=usr/share/syscons/keymaps/INDEX.keymaps
OLD_FILES+=usr/share/syscons/keymaps/be.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/be.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/bg.bds.ctrlcaps.kbd
OLD_FILES+=usr/share/syscons/keymaps/bg.phonetic.ctrlcaps.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.cp1131.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.cp1251.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ce.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/colemak.iso15.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/cs.latin2.qwertz.kbd
OLD_FILES+=usr/share/syscons/keymaps/cz.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.cp865.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.macbook.kbd
OLD_FILES+=usr/share/syscons/keymaps/dutch.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/eee_nordic.kbd
OLD_FILES+=usr/share/syscons/keymaps/el.iso07.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.iso15.kbd
OLD_FILES+=usr/share/syscons/keymaps/finnish.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/finnish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.dvorak.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.macbook.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr_CA.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/gr.elot.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/gr.us101.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/hr.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/hu.iso2.101keys.kbd
OLD_FILES+=usr/share/syscons/keymaps/hu.iso2.102keys.kbd
OLD_FILES+=usr/share/syscons/keymaps/hy.armscii-8.kbd
OLD_FILES+=usr/share/syscons/keymaps/icelandic.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/icelandic.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/it.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/iw.iso8.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.106.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.106x.kbd
OLD_FILES+=usr/share/syscons/keymaps/kk.pt154.io.kbd
OLD_FILES+=usr/share/syscons/keymaps/kk.pt154.kst.kbd
OLD_FILES+=usr/share/syscons/keymaps/latinamerican.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/latinamerican.kbd
OLD_FILES+=usr/share/syscons/keymaps/lt.iso4.kbd
OLD_FILES+=usr/share/syscons/keymaps/norwegian.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/norwegian.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/pl_PL.ISO8859-2.kbd
OLD_FILES+=usr/share/syscons/keymaps/pl_PL.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/pt.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/pt.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.cp866.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.shift.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.win.kbd
OLD_FILES+=usr/share/syscons/keymaps/si.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/sk.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso15.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swedish.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swedish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.macbook.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/tr.iso9.q.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.koi8-u.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.koi8-u.shift.alt.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.cp850-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.iso-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakl.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakp.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakr.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakx.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.emacs.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.pc-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.unix.kbd
OLD_FILES+=usr/share/syscons/scrnmaps/armscii8-2haik8.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-1_to_cp437.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-4_for_vga9.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-7_to_cp437.scm
OLD_FILES+=usr/share/syscons/scrnmaps/koi8-r2cp866.scm
OLD_FILES+=usr/share/syscons/scrnmaps/koi8-u2cp866u.scm
OLD_FILES+=usr/share/syscons/scrnmaps/us-ascii_to_cp437.scm
OLD_DIRS+=usr/share/syscons/fonts
OLD_DIRS+=usr/share/syscons/scrnmaps
OLD_DIRS+=usr/share/syscons/keymaps
OLD_DIRS+=usr/share/syscons
.endif
.if ${MK_TALK} == no
OLD_FILES+=usr/bin/talk
OLD_FILES+=usr/libexec/ntalkd
OLD_FILES+=usr/share/man/man1/talk.1.gz
OLD_FILES+=usr/share/man/man8/talkd.8.gz
.endif
.if ${MK_TCSH} == no
OLD_FILES+=.cshrc
OLD_FILES+=etc/csh.cshrc
OLD_FILES+=etc/csh.login
OLD_FILES+=etc/csh.logout
OLD_FILES+=bin/csh
OLD_FILES+=bin/tcsh
OLD_FILES+=rescue/csh
OLD_FILES+=rescue/tcsh
OLD_FILES+=root/.cshrc
OLD_FILES+=root/.login
OLD_FILES+=usr/share/examples/etc/csh.cshrc
OLD_FILES+=usr/share/examples/etc/csh.login
OLD_FILES+=usr/share/examples/etc/csh.logout
OLD_FILES+=usr/share/examples/tcsh/complete.tcsh
OLD_FILES+=usr/share/examples/tcsh/csh-mode.el
OLD_DIRS+=usr/share/examples/tcsh
OLD_FILES+=usr/share/man/man1/csh.1.gz
OLD_FILES+=usr/share/man/man1/tcsh.1.gz
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
.endif
.if ${MK_TELNET} == no
OLD_FILES+=etc/pam.d/telnetd
OLD_FILES+=usr/bin/telnet
OLD_FILES+=usr/libexec/telnetd
OLD_FILES+=usr/share/man/man1/telnet.1.gz
OLD_FILES+=usr/share/man/man8/telnetd.8.gz
.endif
.if ${MK_TESTS} == yes
OLD_FILES+=usr/bin/atf-sh
OLD_FILES+=usr/include/atf-c++/config.hpp
OLD_FILES+=usr/include/atf-c/config.h
OLD_LIBS+=usr/lib/libatf-c++.a
OLD_LIBS+=usr/lib/libatf-c++.so
OLD_LIBS+=usr/lib/libatf-c++.so.1
OLD_LIBS+=usr/lib/libatf-c++.so.2
OLD_LIBS+=usr/lib/libatf-c++_p.a
OLD_LIBS+=usr/lib/libatf-c.a
OLD_LIBS+=usr/lib/libatf-c.so
OLD_LIBS+=usr/lib/libatf-c.so.1
OLD_LIBS+=usr/lib/libatf-c_p.a
OLD_LIBS+=usr/lib/private/libatf-c.so.0
OLD_LIBS+=usr/lib/private/libatf-c++.so.1
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_LIBS+=usr/lib32/libatf-c++.a
OLD_LIBS+=usr/lib32/libatf-c++.so
OLD_LIBS+=usr/lib32/libatf-c++.so.1
OLD_LIBS+=usr/lib32/libatf-c++.so.2
OLD_LIBS+=usr/lib32/libatf-c++_p.a
OLD_LIBS+=usr/lib32/libatf-c.a
OLD_LIBS+=usr/lib32/libatf-c.so
OLD_LIBS+=usr/lib32/libatf-c.so.1
OLD_LIBS+=usr/lib32/libatf-c_p.a
OLD_LIBS+=usr/lib32/private/libatf-c.so.0
OLD_LIBS+=usr/lib32/private/libatf-c++.so.1
.endif
OLD_FILES+=usr/libdata/pkgconfig/atf-c++.pc
OLD_FILES+=usr/libdata/pkgconfig/atf-c.pc
OLD_FILES+=usr/libdata/pkgconfig/atf-sh.pc
OLD_FILES+=usr/share/aclocal/atf-c++.m4
OLD_FILES+=usr/share/aclocal/atf-c.m4
OLD_FILES+=usr/share/aclocal/atf-common.m4
OLD_FILES+=usr/share/aclocal/atf-sh.m4
OLD_DIRS+=usr/share/aclocal
OLD_DIRS+=usr/tests/bin/chown
OLD_FILES+=usr/tests/bin/chown/Kyuafile
OLD_FILES+=usr/tests/bin/chown/chown-f_test
OLD_FILES+=usr/tests/bin/chown/units_basics
OLD_FILES+=usr/tests/bin/date/legacy_test
OLD_FILES+=usr/tests/bin/sh/legacy_test
OLD_FILES+=usr/tests/usr.bin/atf/Kyuafile
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/Kyuafile
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/atf_check_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/config_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/integration_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/misc_helpers
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/normalize_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/tc_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/tp_test
OLD_DIRS+=usr/tests/usr.bin/atf/atf-sh
OLD_DIRS+=usr/tests/usr.bin/atf
OLD_FILES+=usr/tests/lib/atf/libatf-c/test_helpers_test
OLD_FILES+=usr/tests/lib/atf/test-programs/fork_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/application_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/expand_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/parser_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/ui_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/exceptions_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/expand_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/parser_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/pkg_config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/ui_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/dynstr_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/list_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/map_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/pkg_config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/process_helpers
OLD_FILES+=usr/tests/lib/atf/libatf-c/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/user_test
.if ${MK_MAKE} == yes
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/path/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/path/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/path/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/shell
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/shell
OLD_FILES+=usr/tests/usr.bin/make/shell/select/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/TEST2.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/TEST2.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/common.sh
OLD_FILES+=usr/tests/usr.bin/make/test-new.mk
OLD_DIRS+=usr/tests/usr.bin/make/variables/t0
OLD_DIRS+=usr/tests/usr.bin/make/variables/opt_V
OLD_DIRS+=usr/tests/usr.bin/make/variables/modifier_t
OLD_DIRS+=usr/tests/usr.bin/make/variables/modifier_M
OLD_DIRS+=usr/tests/usr.bin/make/variables
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0
OLD_DIRS+=usr/tests/usr.bin/make/sysmk
OLD_DIRS+=usr/tests/usr.bin/make/syntax/semi
OLD_DIRS+=usr/tests/usr.bin/make/syntax/funny-targets
OLD_DIRS+=usr/tests/usr.bin/make/syntax/enl
OLD_DIRS+=usr/tests/usr.bin/make/syntax/directive-t0
OLD_DIRS+=usr/tests/usr.bin/make/syntax
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/src_wild2
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/src_wild1
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/basic
OLD_DIRS+=usr/tests/usr.bin/make/suffixes
OLD_DIRS+=usr/tests/usr.bin/make/shell/select
OLD_DIRS+=usr/tests/usr.bin/make/shell/replace
OLD_DIRS+=usr/tests/usr.bin/make/shell/path_select
OLD_DIRS+=usr/tests/usr.bin/make/shell/path
OLD_DIRS+=usr/tests/usr.bin/make/shell/meta
OLD_DIRS+=usr/tests/usr.bin/make/shell/builtin
OLD_DIRS+=usr/tests/usr.bin/make/shell
OLD_DIRS+=usr/tests/usr.bin/make/execution/plus
OLD_DIRS+=usr/tests/usr.bin/make/execution/joberr
OLD_DIRS+=usr/tests/usr.bin/make/execution/empty
OLD_DIRS+=usr/tests/usr.bin/make/execution/ellipsis
OLD_DIRS+=usr/tests/usr.bin/make/execution
OLD_DIRS+=usr/tests/usr.bin/make/basic/t3
OLD_DIRS+=usr/tests/usr.bin/make/basic/t2
OLD_DIRS+=usr/tests/usr.bin/make/basic/t1
OLD_DIRS+=usr/tests/usr.bin/make/basic/t0
OLD_DIRS+=usr/tests/usr.bin/make/basic
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_oldbsd
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_44bsd
OLD_DIRS+=usr/tests/usr.bin/make/archives
OLD_DIRS+=usr/tests/usr.bin/make
OLD_FILES+=usr/tests/usr.bin/yacc/legacy_test
OLD_FILES+=usr/tests/usr.bin/yacc/regress.00.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.01.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.02.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.03.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.04.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.05.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.06.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.07.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.08.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.09.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.10.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.11.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.12.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.13.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.14.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.sh
OLD_FILES+=usr/tests/usr.bin/yacc/undefined.y
.endif
.else
# ATF libraries.
OLD_FILES+=etc/mtree/BSD.tests.dist
OLD_FILES+=usr/bin/atf-sh
OLD_DIRS+=usr/include/atf-c
OLD_FILES+=usr/include/atf-c/build.h
OLD_FILES+=usr/include/atf-c/check.h
OLD_FILES+=usr/include/atf-c/config.h
OLD_FILES+=usr/include/atf-c/defs.h
OLD_FILES+=usr/include/atf-c/error.h
OLD_FILES+=usr/include/atf-c/error_fwd.h
OLD_FILES+=usr/include/atf-c/macros.h
OLD_FILES+=usr/include/atf-c/tc.h
OLD_FILES+=usr/include/atf-c/tp.h
OLD_FILES+=usr/include/atf-c/utils.h
OLD_FILES+=usr/include/atf-c.h
OLD_DIRS+=usr/include/atf-c++
OLD_FILES+=usr/include/atf-c++/build.hpp
OLD_FILES+=usr/include/atf-c++/check.hpp
OLD_FILES+=usr/include/atf-c++/config.hpp
OLD_FILES+=usr/include/atf-c++/macros.hpp
OLD_FILES+=usr/include/atf-c++/tests.hpp
OLD_FILES+=usr/include/atf-c++/utils.hpp
OLD_FILES+=usr/include/atf-c++.hpp
OLD_FILES+=usr/lib/libatf-c_p.a
OLD_FILES+=usr/lib/libatf-c.so.1
OLD_FILES+=usr/lib/libatf-c.so
OLD_FILES+=usr/lib/libatf-c++.a
OLD_FILES+=usr/lib/libatf-c++_p.a
OLD_FILES+=usr/lib/libatf-c++.so.1
OLD_FILES+=usr/lib/libatf-c++.so
OLD_FILES+=usr/lib/libatf-c.a
OLD_FILES+=usr/libexec/atf-check
OLD_FILES+=usr/libexec/atf-sh
OLD_DIRS+=usr/share/atf
OLD_FILES+=usr/share/atf/libatf-sh.subr
OLD_DIRS+=usr/share/doc/atf
OLD_FILES+=usr/share/doc/atf/AUTHORS
OLD_FILES+=usr/share/doc/atf/COPYING
OLD_FILES+=usr/share/doc/atf/NEWS
OLD_FILES+=usr/share/doc/atf/README
OLD_FILES+=usr/share/doc/pjdfstest/README
OLD_FILES+=usr/share/man/man1/atf-check.1.gz
OLD_FILES+=usr/share/man/man1/atf-sh.1.gz
OLD_FILES+=usr/share/man/man1/atf-test-program.1.gz
OLD_FILES+=usr/share/man/man3/atf-c-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-sh-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-sh.3.gz
OLD_FILES+=usr/share/man/man4/atf-test-case.4.gz
OLD_FILES+=usr/share/man/man7/atf.7.gz
OLD_FILES+=usr/share/mk/atf.test.mk
OLD_FILES+=usr/share/mk/plain.test.mk
OLD_FILES+=usr/share/mk/suite.test.mk
OLD_FILES+=usr/share/mk/tap.test.mk
# Test suite.
. if exists(${DESTDIR}${TESTSBASE})
TESTS_DIRS!=find ${DESTDIR}${TESTSBASE} -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${TESTS_DIRS}
TESTS_FILES!=find ${DESTDIR}${TESTSBASE} \! -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${TESTS_FILES}
. endif
.endif # Test suite.
.if ${MK_TESTS_SUPPORT} == no
OLD_FILES+=usr/include/atf-c++.hpp
OLD_FILES+=usr/include/atf-c++/build.hpp
OLD_FILES+=usr/include/atf-c++/check.hpp
OLD_FILES+=usr/include/atf-c++/macros.hpp
OLD_FILES+=usr/include/atf-c++/tests.hpp
OLD_FILES+=usr/include/atf-c++/utils.hpp
OLD_FILES+=usr/include/atf-c.h
OLD_FILES+=usr/include/atf-c/build.h
OLD_FILES+=usr/include/atf-c/check.h
OLD_FILES+=usr/include/atf-c/defs.h
OLD_FILES+=usr/include/atf-c/error.h
OLD_FILES+=usr/include/atf-c/error_fwd.h
OLD_FILES+=usr/include/atf-c/macros.h
OLD_FILES+=usr/include/atf-c/tc.h
OLD_FILES+=usr/include/atf-c/tp.h
OLD_FILES+=usr/include/atf-c/utils.h
OLD_LIBS+=usr/lib/private/libatf-c++.so.2
OLD_LIBS+=usr/lib/private/libatf-c.so.1
OLD_FILES+=usr/share/man/man3/atf-c++.3.gz
OLD_FILES+=usr/share/man/man3/atf-c-api++.3.gz
OLD_FILES+=usr/share/man/man3/atf-c-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-c.3.gz
OLD_FILES+=usr/tests/lib/atf/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/atf_c++_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/build_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/check_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/application_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/exceptions_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/version_helper
OLD_FILES+=usr/tests/lib/atf/libatf-c++/macros_hpp_test.cpp
OLD_FILES+=usr/tests/lib/atf/libatf-c++/macros_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/tests_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/unused_test.cpp
OLD_FILES+=usr/tests/lib/atf/libatf-c++/utils_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c/atf_c_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/build_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/check_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/dynstr_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/list_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/map_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/process_helpers
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/user_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/version_helper
OLD_FILES+=usr/tests/lib/atf/libatf-c/error_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/macros_h_test.c
OLD_FILES+=usr/tests/lib/atf/libatf-c/macros_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/tc_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/tp_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/unused_test.c
OLD_FILES+=usr/tests/lib/atf/libatf-c/utils_test
OLD_FILES+=usr/tests/lib/atf/test-programs/Kyuafile
OLD_FILES+=usr/tests/lib/atf/test-programs/c_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/config_test
OLD_FILES+=usr/tests/lib/atf/test-programs/cpp_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/expect_test
OLD_FILES+=usr/tests/lib/atf/test-programs/meta_data_test
OLD_FILES+=usr/tests/lib/atf/test-programs/result_test
OLD_FILES+=usr/tests/lib/atf/test-programs/sh_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/srcdir_test
.endif
.if ${MK_TEXTPROC} == no
OLD_FILES+=usr/bin/checknr
OLD_FILES+=usr/bin/colcrt
OLD_FILES+=usr/bin/ul
OLD_FILES+=usr/share/man/man1/checknr.1.gz
OLD_FILES+=usr/share/man/man1/colcrt.1.gz
OLD_FILES+=usr/share/man/man1/ul.1.gz
.endif
.if ${MK_TFTP} == no
OLD_FILES+=usr/bin/tftp
OLD_FILES+=usr/libexec/tftpd
OLD_FILES+=usr/share/man/man1/tftp.1.gz
OLD_FILES+=usr/share/man/man8/tftpd.8.gz
.endif
.if ${MK_TIMED} == no
OLD_FILES+=usr/sbin/timed
OLD_FILES+=usr/sbin/timedc
OLD_FILES+=usr/share/man/man8/timed.8.gz
OLD_FILES+=usr/share/man/man8/timedc.8.gz
.endif
.if ${MK_TOOLCHAIN} == no
OLD_FILES+=usr/bin/addr2line
OLD_FILES+=usr/bin/as
OLD_FILES+=usr/bin/byacc
OLD_FILES+=usr/bin/cc
OLD_FILES+=usr/bin/c88
OLD_FILES+=usr/bin/c++
OLD_FILES+=usr/bin/c++filt
OLD_FILES+=usr/bin/ld
OLD_FILES+=usr/bin/ld.bfd
OLD_FILES+=usr/bin/nm
OLD_FILES+=usr/bin/objcopy
OLD_FILES+=usr/bin/readelf
OLD_FILES+=usr/bin/size
OLD_FILES+=usr/bin/strip
OLD_FILES+=usr/bin/yacc
OLD_FILES+=usr/share/man/man1/addr2line.1.gz
OLD_FILES+=usr/share/man/man1/c++filt.1.gz
OLD_FILES+=usr/share/man/man1/nm.1.gz
OLD_FILES+=usr/share/man/man1/readelf.1.gz
OLD_FILES+=usr/share/man/man1/size.1.gz
OLD_FILES+=usr/share/man/man1/strip.1.gz
OLD_FILES+=usr/share/man/man1/objcopy.1.gz
# lib/libelf
OLD_FILES+=usr/share/man/man3/elf.3.gz
OLD_FILES+=usr/share/man/man3/elf_begin.3.gz
OLD_FILES+=usr/share/man/man3/elf_cntl.3.gz
OLD_FILES+=usr/share/man/man3/elf_end.3.gz
OLD_FILES+=usr/share/man/man3/elf_errmsg.3.gz
OLD_FILES+=usr/share/man/man3/elf_fill.3.gz
OLD_FILES+=usr/share/man/man3/elf_flagdata.3.gz
OLD_FILES+=usr/share/man/man3/elf_getarhdr.3.gz
OLD_FILES+=usr/share/man/man3/elf_getarsym.3.gz
OLD_FILES+=usr/share/man/man3/elf_getbase.3.gz
OLD_FILES+=usr/share/man/man3/elf_getdata.3.gz
OLD_FILES+=usr/share/man/man3/elf_getident.3.gz
OLD_FILES+=usr/share/man/man3/elf_getscn.3.gz
OLD_FILES+=usr/share/man/man3/elf_getphdrnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getphnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshdrnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshdrstrndx.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshstrndx.3.gz
OLD_FILES+=usr/share/man/man3/elf_hash.3.gz
OLD_FILES+=usr/share/man/man3/elf_kind.3.gz
OLD_FILES+=usr/share/man/man3/elf_memory.3.gz
OLD_FILES+=usr/share/man/man3/elf_next.3.gz
OLD_FILES+=usr/share/man/man3/elf_open.3.gz
OLD_FILES+=usr/share/man/man3/elf_rawfile.3.gz
OLD_FILES+=usr/share/man/man3/elf_rand.3.gz
OLD_FILES+=usr/share/man/man3/elf_strptr.3.gz
OLD_FILES+=usr/share/man/man3/elf_update.3.gz
OLD_FILES+=usr/share/man/man3/elf_version.3.gz
OLD_FILES+=usr/share/man/man3/gelf.3.gz
OLD_FILES+=usr/share/man/man3/gelf_checksum.3.gz
OLD_FILES+=usr/share/man/man3/gelf_fsize.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getcap.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getclass.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getdyn.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getmove.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getphdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getrel.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getrela.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getshdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsym.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsyminfo.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsymshndx.3.gz
OLD_FILES+=usr/share/man/man3/gelf_newehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_newphdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_update_ehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_xlatetof.3.gz
# lib/libelftc
OLD_FILES+=usr/share/man/man3/elftc.3.gz
OLD_FILES+=usr/share/man/man3/elftc_bfd_find_target.3.gz
OLD_FILES+=usr/share/man/man3/elftc_copyfile.3.gz
OLD_FILES+=usr/share/man/man3/elftc_demangle.3.gz
OLD_FILES+=usr/share/man/man3/elftc_reloc_type_str.3.gz
OLD_FILES+=usr/share/man/man3/elftc_set_timestamps.3.gz
OLD_FILES+=usr/share/man/man3/elftc_timestamp.3.gz
OLD_FILES+=usr/share/man/man3/elftc_string_table_create.3.gz
OLD_FILES+=usr/share/man/man3/elftc_version.3.gz
OLD_FILES+=usr/tests/usr.bin/yacc/Kyuafile
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_demo.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc2.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc3.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_debug.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_error.y
OLD_FILES+=usr/tests/usr.bin/yacc/empty.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit2.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit4.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit5.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax1.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax10.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax11.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax12.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax13.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax14.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax15.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax16.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax17.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax18.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax19.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax2.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax20.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax21.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax22.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax23.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax24.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax25.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax26.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax27.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax4.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax5.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax6.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7a.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7b.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax8.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax8a.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax9.y
OLD_FILES+=usr/tests/usr.bin/yacc/error.y
OLD_FILES+=usr/tests/usr.bin/yacc/grammar.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit0.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit2.y
OLD_FILES+=usr/tests/usr.bin/yacc/ok_syntax1.y
OLD_FILES+=usr/tests/usr.bin/yacc/pure_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/pure_error.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc2.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc3.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc4.y
OLD_FILES+=usr/tests/usr.bin/yacc/run_test
OLD_FILES+=usr/tests/usr.bin/yacc/varsyntax_calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_b.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_b.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_l.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_l.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.code.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.code.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.dot
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/help.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/help.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_code_c.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_code_c.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_defines.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_defines.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_graph.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_graph.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_include.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_include.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_opts.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_opts.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_verbose.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_verbose.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/nostdin.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/nostdin.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.i
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc_tests
OLD_DIRS+=usr/tests/usr.bin/yacc
.endif
.if ${MK_UNBOUND} == no
OLD_FILES+=etc/rc.d/local_unbound
OLD_FILES+=etc/unbound
OLD_FILES+=usr/lib/private/libunbound.a
OLD_FILES+=usr/lib/private/libunbound.so
OLD_LIBS+=usr/lib/private/libunbound.so.5
OLD_FILES+=usr/lib/private/libunbound_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/private/libunbound.a
OLD_FILES+=usr/lib32/private/libunbound.so
OLD_LIBS+=usr/lib32/private/libunbound.so.5
OLD_FILES+=usr/lib32/private/libunbound_p.a
OLD_FILES+=usr/share/man/man5/local-unbound.conf.5.gz
OLD_FILES+=usr/share/man/man8/local-unbound-anchor.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound-control.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound.8.gz
.endif
OLD_FILES+=usr/sbin/local-unbound-setup
OLD_FILES+=usr/sbin/local-unbound
OLD_FILES+=usr/sbin/local-unbound-anchor
OLD_FILES+=usr/sbin/local-unbound-checkconf
OLD_FILES+=usr/sbin/local-unbound-control
.endif
.if ${MK_USB} == no
OLD_FILES+=etc/devd/uath.conf
OLD_FILES+=etc/devd/uauth.conf
OLD_FILES+=etc/devd/ulpt.conf
OLD_FILES+=etc/devd/usb.conf
OLD_FILES+=usr/bin/usbhidaction
OLD_FILES+=usr/bin/usbhidctl
OLD_FILES+=usr/include/libusb.h
OLD_FILES+=usr/include/libusb20.h
OLD_FILES+=usr/include/libusb20_desc.h
OLD_FILES+=usr/include/usb.h
OLD_FILES+=usr/include/usbhid.h
OLD_FILES+=usr/lib/libusb.a
OLD_FILES+=usr/lib/libusb.so
OLD_LIBS+=usr/lib/libusb.so.3
OLD_FILES+=usr/lib/libusb_p.a
OLD_FILES+=usr/lib/libusbhid.a
OLD_FILES+=usr/lib/libusbhid.so
OLD_LIBS+=usr/lib/libusbhid.so.4
OLD_FILES+=usr/lib/libusbhid_p.a
OLD_FILES+=usr/lib32/libusb.a
OLD_FILES+=usr/lib32/libusb.so
OLD_LIBS+=usr/lib32/libusb.so.3
OLD_FILES+=usr/lib32/libusb_p.a
OLD_FILES+=usr/lib32/libusbhid.a
OLD_FILES+=usr/lib32/libusbhid.so
OLD_LIBS+=usr/lib32/libusbhid.so.4
OLD_FILES+=usr/lib32/libusbhid_p.a
OLD_FILES+=usr/libdata/pkgconfig/libusb-0.1.pc
OLD_FILES+=usr/libdata/pkgconfig/libusb-1.0.pc
OLD_FILES+=usr/libdata/pkgconfig/libusb-2.0.pc
OLD_FILES+=usr/sbin/uathload
OLD_FILES+=usr/sbin/uhsoctl
OLD_FILES+=usr/sbin/usbconfig
OLD_FILES+=usr/sbin/usbdump
OLD_FILES+=usr/share/examples/libusb20/Makefile
OLD_FILES+=usr/share/examples/libusb20/README
OLD_FILES+=usr/share/examples/libusb20/bulk.c
OLD_FILES+=usr/share/examples/libusb20/control.c
OLD_FILES+=usr/share/examples/libusb20/util.c
OLD_FILES+=usr/share/examples/libusb20/util.h
OLD_DIRS+=usr/share/examples/libusb20
OLD_FILES+=usr/share/firmware/ar5523.bin
OLD_FILES+=usr/share/man/man1/uhsoctl.1.gz
OLD_FILES+=usr/share/man/man1/usbhidaction.1.gz
OLD_FILES+=usr/share/man/man1/usbhidctl.1.gz
OLD_FILES+=usr/share/man/man3/hid_dispose_report_desc.3.gz
OLD_FILES+=usr/share/man/man3/hid_end_parse.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_data.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_item.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_report_desc.3.gz
OLD_FILES+=usr/share/man/man3/hid_init.3.gz
OLD_FILES+=usr/share/man/man3/hid_locate.3.gz
OLD_FILES+=usr/share/man/man3/hid_report_size.3.gz
OLD_FILES+=usr/share/man/man3/hid_set_data.3.gz
OLD_FILES+=usr/share/man/man3/hid_start_parse.3.gz
OLD_FILES+=usr/share/man/man3/hid_usage_in_page.3.gz
OLD_FILES+=usr/share/man/man3/hid_usage_page.3.gz
OLD_FILES+=usr/share/man/man3/libusb.3.gz
OLD_FILES+=usr/share/man/man3/libusb20.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_add_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_alloc_default.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_dequeue_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_device_foreach.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_enqueue_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_free.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_quirk_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_template.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_remove_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_set_template.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_desc_foreach.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_alloc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_alloc_config.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_detach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_free.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_backend_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_bus_number.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_config_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_device_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_fd.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_iface_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_info.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_parent_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_parent_port.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_port_path.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_power_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_power_usage.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_speed.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_kernel_driver_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_process.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_req_string_simple_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_req_string_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_request_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_reset.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_alt_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_config_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_power_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_wait_process.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_error_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_decode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_encode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_get_1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_get_2.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_strerror.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_bulk_intr_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_callback_wrapper.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_clear_stall_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_drain.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_actual_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_actual_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_packet_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_total_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_pointer.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_priv_sc0.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_priv_sc1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_status.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_time_complete.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_pending.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_buffer.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_callback.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_priv_sc0.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_priv_sc1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_total_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_bulk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_control.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_intr.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_isoc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_start.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_stop.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_submit.3.gz
OLD_FILES+=usr/share/man/man3/libusb_alloc_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_attach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_bulk_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_cancel_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/libusb_claim_interface.3.gz
OLD_FILES+=usr/share/man/man3/libusb_clear_halt.3.gz
OLD_FILES+=usr/share/man/man3/libusb_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb_control_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_detach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_detach_kernel_driver_np.3.gz
OLD_FILES+=usr/share/man/man3/libusb_error_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb_event_handler_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb_event_handling_ok.3.gz
OLD_FILES+=usr/share/man/man3/libusb_exit.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_bos_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_device_list.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_ss_endpoint_comp.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_active_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_bus_number.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_config_descriptor_by_value.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_configuration.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_list.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_speed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_driver_np.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_max_iso_packet_size.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_max_packet_size.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_next_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_pollfds.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_string_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_string_descriptor_ascii.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_completed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_locked.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_timeout_completed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_init.3.gz
OLD_FILES+=usr/share/man/man3/libusb_interrupt_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_kernel_driver_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb_lock_event_waiters.3.gz
OLD_FILES+=usr/share/man/man3/libusb_lock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb_open_device_with_vid_pid.3.gz
OLD_FILES+=usr/share/man/man3/libusb_parse_bos_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_parse_ss_endpoint_comp.3.gz
OLD_FILES+=usr/share/man/man3/libusb_ref_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_release_interface.3.gz
OLD_FILES+=usr/share/man/man3/libusb_reset_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_configuration.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_interface_alt_setting.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_pollfd_notifiers.3.gz
OLD_FILES+=usr/share/man/man3/libusb_strerror.3.gz
OLD_FILES+=usr/share/man/man3/libusb_submit_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_try_lock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unlock_event_waiters.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unlock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unref_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_wait_for_event.3.gz
OLD_FILES+=usr/share/man/man3/libusbhid.3.gz
OLD_FILES+=usr/share/man/man3/usb.3.gz
OLD_FILES+=usr/share/man/man3/usb_bulk_read.3.gz
OLD_FILES+=usr/share/man/man3/usb_bulk_write.3.gz
OLD_FILES+=usr/share/man/man3/usb_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/usb_claim_interface.3.gz
OLD_FILES+=usr/share/man/man3/usb_clear_halt.3.gz
OLD_FILES+=usr/share/man/man3/usb_close.3.gz
OLD_FILES+=usr/share/man/man3/usb_control_msg.3.gz
OLD_FILES+=usr/share/man/man3/usb_destroy_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_device.3.gz
OLD_FILES+=usr/share/man/man3/usb_fetch_and_parse_descriptors.3.gz
OLD_FILES+=usr/share/man/man3/usb_find_busses.3.gz
OLD_FILES+=usr/share/man/man3/usb_find_devices.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_busses.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_descriptor_by_endpoint.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_string.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_string_simple.3.gz
OLD_FILES+=usr/share/man/man3/usb_init.3.gz
OLD_FILES+=usr/share/man/man3/usb_interrupt_read.3.gz
OLD_FILES+=usr/share/man/man3/usb_interrupt_write.3.gz
OLD_FILES+=usr/share/man/man3/usb_open.3.gz
OLD_FILES+=usr/share/man/man3/usb_parse_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_parse_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/usb_release_interface.3.gz
OLD_FILES+=usr/share/man/man3/usb_reset.3.gz
OLD_FILES+=usr/share/man/man3/usb_resetep.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_altinterface.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/usb_strerror.3.gz
OLD_FILES+=usr/share/man/man3/usbhid.3.gz
OLD_FILES+=usr/share/man/man4/if_otus.4.gz
OLD_FILES+=usr/share/man/man4/if_rsu.4.gz
OLD_FILES+=usr/share/man/man4/if_rtwn_usb.4.gz
OLD_FILES+=usr/share/man/man4/if_rum.4.gz
OLD_FILES+=usr/share/man/man4/if_run.4.gz
OLD_FILES+=usr/share/man/man4/if_zyd.4.gz
OLD_FILES+=usr/share/man/man4/otus.4.gz
OLD_FILES+=usr/share/man/man4/otusfw.4.gz
OLD_FILES+=usr/share/man/man4/rsu.4.gz
OLD_FILES+=usr/share/man/man4/rsufw.4.gz
OLD_FILES+=usr/share/man/man4/rtwn_usb.4.gz
OLD_FILES+=usr/share/man/man4/rum.4.gz
OLD_FILES+=usr/share/man/man4/run.4.gz
OLD_FILES+=usr/share/man/man4/runfw.4.gz
OLD_FILES+=usr/share/man/man4/u3g.4.gz
OLD_FILES+=usr/share/man/man4/u3gstub.4.gz
OLD_FILES+=usr/share/man/man4/uark.4.gz
OLD_FILES+=usr/share/man/man4/uart.4.gz
OLD_FILES+=usr/share/man/man4/uath.4.gz
OLD_FILES+=usr/share/man/man4/ubsa.4.gz
OLD_FILES+=usr/share/man/man4/ubsec.4.gz
OLD_FILES+=usr/share/man/man4/ubser.4.gz
OLD_FILES+=usr/share/man/man4/ubtbcmfw.4.gz
OLD_FILES+=usr/share/man/man4/uchcom.4.gz
OLD_FILES+=usr/share/man/man4/ucom.4.gz
OLD_FILES+=usr/share/man/man4/ucycom.4.gz
OLD_FILES+=usr/share/man/man4/udav.4.gz
OLD_FILES+=usr/share/man/man4/udbp.4.gz
OLD_FILES+=usr/share/man/man4/uep.4.gz
OLD_FILES+=usr/share/man/man4/ufm.4.gz
OLD_FILES+=usr/share/man/man4/ufoma.4.gz
OLD_FILES+=usr/share/man/man4/uftdi.4.gz
OLD_FILES+=usr/share/man/man4/ugen.4.gz
OLD_FILES+=usr/share/man/man4/uhci.4.gz
OLD_FILES+=usr/share/man/man4/uhid.4.gz
OLD_FILES+=usr/share/man/man4/uhso.4.gz
OLD_FILES+=usr/share/man/man4/uipaq.4.gz
OLD_FILES+=usr/share/man/man4/ukbd.4.gz
OLD_FILES+=usr/share/man/man4/uled.4.gz
OLD_FILES+=usr/share/man/man4/ulpt.4.gz
OLD_FILES+=usr/share/man/man4/umass.4.gz
OLD_FILES+=usr/share/man/man4/umcs.4.gz
OLD_FILES+=usr/share/man/man4/umct.4.gz
OLD_FILES+=usr/share/man/man4/umodem.4.gz
OLD_FILES+=usr/share/man/man4/umoscom.4.gz
OLD_FILES+=usr/share/man/man4/ums.4.gz
OLD_FILES+=usr/share/man/man4/unix.4.gz
OLD_FILES+=usr/share/man/man4/upgt.4.gz
OLD_FILES+=usr/share/man/man4/uplcom.4.gz
OLD_FILES+=usr/share/man/man4/ural.4.gz
OLD_FILES+=usr/share/man/man4/urio.4.gz
OLD_FILES+=usr/share/man/man4/urndis.4.gz
OLD_FILES+=usr/share/man/man4/urtw.4.gz
OLD_FILES+=usr/share/man/man4/usb.4.gz
OLD_FILES+=usr/share/man/man4/usb_quirk.4.gz
OLD_FILES+=usr/share/man/man4/usb_template.4.gz
OLD_FILES+=usr/share/man/man4/usfs.4.gz
OLD_FILES+=usr/share/man/man4/uslcom.4.gz
OLD_FILES+=usr/share/man/man4/uvisor.4.gz
OLD_FILES+=usr/share/man/man4/uvscom.4.gz
OLD_FILES+=usr/share/man/man4/zyd.4.gz
OLD_FILES+=usr/share/man/man8/uathload.8.gz
OLD_FILES+=usr/share/man/man8/usbconfig.8.gz
OLD_FILES+=usr/share/man/man8/usbdump.8.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_alloc_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_attach.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_detach.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_free_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_error.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_linear.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_bytes_max.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_error.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_linear.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_reset.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_softc.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_wakeup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_flags.9.gz
OLD_FILES+=usr/share/man/man9/usbd_errstr.9.gz
OLD_FILES+=usr/share/man/man9/usbd_lookup_id_by_info.9.gz
OLD_FILES+=usr/share/man/man9/usbd_lookup_id_by_uaa.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_clear_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_drain.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_pending.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_poll.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_setup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_start.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_stop.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_submit.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_unsetup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_clr_flag.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_frame_data.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_frame_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_get_frame.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_get_priv.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_is_stalled.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_framelen.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_frames.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_flag.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_data.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_offset.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frames.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_interval.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_priv.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_timeout.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_softc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_state.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_status.9.gz
OLD_FILES+=usr/share/man/man9/usbdi.9.gz
OLD_FILES+=usr/share/misc/usb_hid_usages
OLD_FILES+=usr/share/misc/usbdevs
.endif
.if ${MK_UTMPX} == no
OLD_FILES+=etc/periodic/monthly/200.accounting
OLD_FILES+=usr/bin/last
OLD_FILES+=usr/bin/users
OLD_FILES+=usr/bin/who
OLD_FILES+=usr/sbin/ac
OLD_FILES+=usr/sbin/lastlogin
OLD_FILES+=usr/sbin/utx
OLD_FILES+=usr/share/man/man1/last.1.gz
OLD_FILES+=usr/share/man/man1/users.1.gz
OLD_FILES+=usr/share/man/man1/who.1.gz
OLD_FILES+=usr/share/man/man8/ac.8.gz
OLD_FILES+=usr/share/man/man8/lastlogin.8.gz
OLD_FILES+=usr/share/man/man8/utx.8.gz
.endif
.if ${MK_VI} == no
OLD_FILES+=etc/rc.d/virecover
OLD_FILES+=rescue/ex
OLD_FILES+=rescue/vi
OLD_FILES+=usr/bin/ex
OLD_FILES+=usr/bin/nex
OLD_FILES+=usr/bin/nvi
OLD_FILES+=usr/bin/nview
OLD_FILES+=usr/bin/vi
OLD_FILES+=usr/bin/view
OLD_FILES+=usr/share/man/man1/ex.1.gz
OLD_FILES+=usr/share/man/man1/nex.1.gz
OLD_FILES+=usr/share/man/man1/nvi.1.gz
OLD_FILES+=usr/share/man/man1/nview.1.gz
OLD_FILES+=usr/share/man/man1/vi.1.gz
OLD_FILES+=usr/share/man/man1/view.1.gz
. if exists(${DESTDIR}/usr/share/vi)
VI_DIRS!=find ${DESTDIR}/usr/share/vi -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
VI_FILES!=find ${DESTDIR}/usr/share/vi \! -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${VI_DIRS}
OLD_FILES+=${VI_FILES}
. endif
.endif
.if ${MK_WIRELESS} == no
OLD_FILES+=etc/regdomain.xml
OLD_FILES+=etc/rc.d/hostapd
OLD_FILES+=etc/rc.d/wpa_supplicant
OLD_FILES+=usr/sbin/ancontrol
OLD_FILES+=usr/sbin/hostapd
OLD_FILES+=usr/sbin/hostapd_cli
OLD_FILES+=usr/sbin/ndis_events
OLD_FILES+=usr/sbin/wlandebug
OLD_FILES+=usr/sbin/wpa_cli
OLD_FILES+=usr/sbin/wpa_passphrase
OLD_FILES+=usr/sbin/wpa_supplicant
OLD_FILES+=usr/share/examples/etc/regdomain.xml
OLD_FILES+=usr/share/examples/etc/wpa_supplicant.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.eap_user
OLD_FILES+=usr/share/examples/hostapd/hostapd.wpa_psk
OLD_DIRS+=usr/share/examples/hostapd
OLD_FILES+=usr/share/man/man5/hostapd.conf.5.gz
OLD_FILES+=usr/share/man/man5/wpa_supplicant.conf.5.gz
OLD_FILES+=usr/share/man/man8/ancontrol.8.gz
OLD_FILES+=usr/share/man/man8/hostapd.8.gz
OLD_FILES+=usr/share/man/man8/hostapd_cli.8.gz
OLD_FILES+=usr/share/man/man8/ndis_events.8.gz
OLD_FILES+=usr/share/man/man8/wlandebug.8.gz
OLD_FILES+=usr/share/man/man8/wpa_cli.8.gz
OLD_FILES+=usr/share/man/man8/wpa_passphrase.8.gz
OLD_FILES+=usr/share/man/man8/wpa_supplicant.8.gz
OLD_FILES+=usr/lib/snmp_wlan.so
OLD_LIBS+=usr/lib/snmp_wlan.so.6
# bsnmp module
OLD_FILES+=usr/share/man/man3/snmp_wlan.3.gz
OLD_FILES+=usr/share/snmp/defs/wlan_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-WIRELESS-MIB.txt
.endif
.if ${MK_SVNLITE} == no || ${MK_SVN} == yes
OLD_FILES+=usr/bin/svnlite
OLD_FILES+=usr/bin/svnliteadmin
OLD_FILES+=usr/bin/svnlitebench
OLD_FILES+=usr/bin/svnlitedumpfilter
OLD_FILES+=usr/bin/svnlitefsfs
OLD_FILES+=usr/bin/svnlitelook
OLD_FILES+=usr/bin/svnlitemucc
OLD_FILES+=usr/bin/svnliterdump
OLD_FILES+=usr/bin/svnliteserve
OLD_FILES+=usr/bin/svnlitesync
OLD_FILES+=usr/bin/svnliteversion
OLD_FILES+=usr/share/man/man1/svnlite.1.gz
.endif
.if ${MK_SVN} == no
OLD_FILES+=usr/bin/svn
OLD_FILES+=usr/bin/svnadmin
OLD_FILES+=usr/bin/svnbench
OLD_FILES+=usr/bin/svndumpfilter
OLD_FILES+=usr/bin/svnfsfs
OLD_FILES+=usr/bin/svnlook
OLD_FILES+=usr/bin/svnmucc
OLD_FILES+=usr/bin/svnrdump
OLD_FILES+=usr/bin/svnserve
OLD_FILES+=usr/bin/svnsync
OLD_FILES+=usr/bin/svnversion
.endif
.if ${MK_HYPERV} == no
OLD_FILES+=etc/devd/hyperv.conf
OLD_FILES+=usr/libexec/hyperv/hv_set_ifconfig
OLD_FILES+=usr/libexec/hyperv/hv_get_dns_info
OLD_FILES+=usr/libexec/hyperv/hv_get_dhcp_info
OLD_FILES+=usr/sbin/hv_kvp_daemon
OLD_FILES+=usr/sbin/hv_vss_daemon
OLD_FILES+=usr/share/man/man8/hv_kvp_daemon.8.gz
.endif
.if ${MK_ZONEINFO} == no
OLD_FILES+=usr/share/zoneinfo/Africa/Abidjan
OLD_FILES+=usr/share/zoneinfo/Africa/Accra
OLD_FILES+=usr/share/zoneinfo/Africa/Addis_Ababa
OLD_FILES+=usr/share/zoneinfo/Africa/Algiers
OLD_FILES+=usr/share/zoneinfo/Africa/Asmara
OLD_FILES+=usr/share/zoneinfo/Africa/Bamako
OLD_FILES+=usr/share/zoneinfo/Africa/Bangui
OLD_FILES+=usr/share/zoneinfo/Africa/Banjul
OLD_FILES+=usr/share/zoneinfo/Africa/Bissau
OLD_FILES+=usr/share/zoneinfo/Africa/Blantyre
OLD_FILES+=usr/share/zoneinfo/Africa/Brazzaville
OLD_FILES+=usr/share/zoneinfo/Africa/Bujumbura
OLD_FILES+=usr/share/zoneinfo/Africa/Cairo
OLD_FILES+=usr/share/zoneinfo/Africa/Casablanca
OLD_FILES+=usr/share/zoneinfo/Africa/Ceuta
OLD_FILES+=usr/share/zoneinfo/Africa/Conakry
OLD_FILES+=usr/share/zoneinfo/Africa/Dakar
OLD_FILES+=usr/share/zoneinfo/Africa/Dar_es_Salaam
OLD_FILES+=usr/share/zoneinfo/Africa/Djibouti
OLD_FILES+=usr/share/zoneinfo/Africa/Douala
OLD_FILES+=usr/share/zoneinfo/Africa/El_Aaiun
OLD_FILES+=usr/share/zoneinfo/Africa/Freetown
OLD_FILES+=usr/share/zoneinfo/Africa/Gaborone
OLD_FILES+=usr/share/zoneinfo/Africa/Harare
OLD_FILES+=usr/share/zoneinfo/Africa/Johannesburg
OLD_FILES+=usr/share/zoneinfo/Africa/Juba
OLD_FILES+=usr/share/zoneinfo/Africa/Kampala
OLD_FILES+=usr/share/zoneinfo/Africa/Khartoum
OLD_FILES+=usr/share/zoneinfo/Africa/Kigali
OLD_FILES+=usr/share/zoneinfo/Africa/Kinshasa
OLD_FILES+=usr/share/zoneinfo/Africa/Lagos
OLD_FILES+=usr/share/zoneinfo/Africa/Libreville
OLD_FILES+=usr/share/zoneinfo/Africa/Lome
OLD_FILES+=usr/share/zoneinfo/Africa/Luanda
OLD_FILES+=usr/share/zoneinfo/Africa/Lubumbashi
OLD_FILES+=usr/share/zoneinfo/Africa/Lusaka
OLD_FILES+=usr/share/zoneinfo/Africa/Malabo
OLD_FILES+=usr/share/zoneinfo/Africa/Maputo
OLD_FILES+=usr/share/zoneinfo/Africa/Maseru
OLD_FILES+=usr/share/zoneinfo/Africa/Mbabane
OLD_FILES+=usr/share/zoneinfo/Africa/Mogadishu
OLD_FILES+=usr/share/zoneinfo/Africa/Monrovia
OLD_FILES+=usr/share/zoneinfo/Africa/Nairobi
OLD_FILES+=usr/share/zoneinfo/Africa/Ndjamena
OLD_FILES+=usr/share/zoneinfo/Africa/Niamey
OLD_FILES+=usr/share/zoneinfo/Africa/Nouakchott
OLD_FILES+=usr/share/zoneinfo/Africa/Ouagadougou
OLD_FILES+=usr/share/zoneinfo/Africa/Porto-Novo
OLD_FILES+=usr/share/zoneinfo/Africa/Sao_Tome
OLD_FILES+=usr/share/zoneinfo/Africa/Tripoli
OLD_FILES+=usr/share/zoneinfo/Africa/Tunis
OLD_FILES+=usr/share/zoneinfo/Africa/Windhoek
OLD_FILES+=usr/share/zoneinfo/America/Adak
OLD_FILES+=usr/share/zoneinfo/America/Anchorage
OLD_FILES+=usr/share/zoneinfo/America/Anguilla
OLD_FILES+=usr/share/zoneinfo/America/Antigua
OLD_FILES+=usr/share/zoneinfo/America/Araguaina
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Buenos_Aires
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Catamarca
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Cordoba
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Jujuy
OLD_FILES+=usr/share/zoneinfo/America/Argentina/La_Rioja
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Mendoza
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Rio_Gallegos
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Salta
OLD_FILES+=usr/share/zoneinfo/America/Argentina/San_Juan
OLD_FILES+=usr/share/zoneinfo/America/Argentina/San_Luis
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Tucuman
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Ushuaia
OLD_FILES+=usr/share/zoneinfo/America/Aruba
OLD_FILES+=usr/share/zoneinfo/America/Asuncion
OLD_FILES+=usr/share/zoneinfo/America/Atikokan
OLD_FILES+=usr/share/zoneinfo/America/Bahia
OLD_FILES+=usr/share/zoneinfo/America/Bahia_Banderas
OLD_FILES+=usr/share/zoneinfo/America/Barbados
OLD_FILES+=usr/share/zoneinfo/America/Belem
OLD_FILES+=usr/share/zoneinfo/America/Belize
OLD_FILES+=usr/share/zoneinfo/America/Blanc-Sablon
OLD_FILES+=usr/share/zoneinfo/America/Boa_Vista
OLD_FILES+=usr/share/zoneinfo/America/Bogota
OLD_FILES+=usr/share/zoneinfo/America/Boise
OLD_FILES+=usr/share/zoneinfo/America/Cambridge_Bay
OLD_FILES+=usr/share/zoneinfo/America/Campo_Grande
OLD_FILES+=usr/share/zoneinfo/America/Cancun
OLD_FILES+=usr/share/zoneinfo/America/Caracas
OLD_FILES+=usr/share/zoneinfo/America/Cayenne
OLD_FILES+=usr/share/zoneinfo/America/Cayman
OLD_FILES+=usr/share/zoneinfo/America/Chicago
OLD_FILES+=usr/share/zoneinfo/America/Chihuahua
OLD_FILES+=usr/share/zoneinfo/America/Costa_Rica
OLD_FILES+=usr/share/zoneinfo/America/Creston
OLD_FILES+=usr/share/zoneinfo/America/Cuiaba
OLD_FILES+=usr/share/zoneinfo/America/Curacao
OLD_FILES+=usr/share/zoneinfo/America/Danmarkshavn
OLD_FILES+=usr/share/zoneinfo/America/Dawson
OLD_FILES+=usr/share/zoneinfo/America/Dawson_Creek
OLD_FILES+=usr/share/zoneinfo/America/Denver
OLD_FILES+=usr/share/zoneinfo/America/Detroit
OLD_FILES+=usr/share/zoneinfo/America/Dominica
OLD_FILES+=usr/share/zoneinfo/America/Edmonton
OLD_FILES+=usr/share/zoneinfo/America/Eirunepe
OLD_FILES+=usr/share/zoneinfo/America/El_Salvador
OLD_FILES+=usr/share/zoneinfo/America/Fortaleza
OLD_FILES+=usr/share/zoneinfo/America/Glace_Bay
OLD_FILES+=usr/share/zoneinfo/America/Godthab
OLD_FILES+=usr/share/zoneinfo/America/Goose_Bay
OLD_FILES+=usr/share/zoneinfo/America/Grand_Turk
OLD_FILES+=usr/share/zoneinfo/America/Grenada
OLD_FILES+=usr/share/zoneinfo/America/Guadeloupe
OLD_FILES+=usr/share/zoneinfo/America/Guatemala
OLD_FILES+=usr/share/zoneinfo/America/Guayaquil
OLD_FILES+=usr/share/zoneinfo/America/Guyana
OLD_FILES+=usr/share/zoneinfo/America/Halifax
OLD_FILES+=usr/share/zoneinfo/America/Havana
OLD_FILES+=usr/share/zoneinfo/America/Hermosillo
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Indianapolis
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Knox
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Marengo
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Petersburg
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Tell_City
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Vevay
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Vincennes
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Winamac
OLD_FILES+=usr/share/zoneinfo/America/Inuvik
OLD_FILES+=usr/share/zoneinfo/America/Iqaluit
OLD_FILES+=usr/share/zoneinfo/America/Jamaica
OLD_FILES+=usr/share/zoneinfo/America/Juneau
OLD_FILES+=usr/share/zoneinfo/America/Kentucky/Louisville
OLD_FILES+=usr/share/zoneinfo/America/Kentucky/Monticello
OLD_FILES+=usr/share/zoneinfo/America/Kralendijk
OLD_FILES+=usr/share/zoneinfo/America/La_Paz
OLD_FILES+=usr/share/zoneinfo/America/Lima
OLD_FILES+=usr/share/zoneinfo/America/Los_Angeles
OLD_FILES+=usr/share/zoneinfo/America/Lower_Princes
OLD_FILES+=usr/share/zoneinfo/America/Maceio
OLD_FILES+=usr/share/zoneinfo/America/Managua
OLD_FILES+=usr/share/zoneinfo/America/Manaus
OLD_FILES+=usr/share/zoneinfo/America/Marigot
OLD_FILES+=usr/share/zoneinfo/America/Martinique
OLD_FILES+=usr/share/zoneinfo/America/Matamoros
OLD_FILES+=usr/share/zoneinfo/America/Mazatlan
OLD_FILES+=usr/share/zoneinfo/America/Menominee
OLD_FILES+=usr/share/zoneinfo/America/Merida
OLD_FILES+=usr/share/zoneinfo/America/Metlakatla
OLD_FILES+=usr/share/zoneinfo/America/Mexico_City
OLD_FILES+=usr/share/zoneinfo/America/Miquelon
OLD_FILES+=usr/share/zoneinfo/America/Moncton
OLD_FILES+=usr/share/zoneinfo/America/Monterrey
OLD_FILES+=usr/share/zoneinfo/America/Montevideo
OLD_FILES+=usr/share/zoneinfo/America/Montreal
OLD_FILES+=usr/share/zoneinfo/America/Montserrat
OLD_FILES+=usr/share/zoneinfo/America/Nassau
OLD_FILES+=usr/share/zoneinfo/America/New_York
OLD_FILES+=usr/share/zoneinfo/America/Nipigon
OLD_FILES+=usr/share/zoneinfo/America/Nome
OLD_FILES+=usr/share/zoneinfo/America/Noronha
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/Beulah
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/Center
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/New_Salem
OLD_FILES+=usr/share/zoneinfo/America/Ojinaga
OLD_FILES+=usr/share/zoneinfo/America/Panama
OLD_FILES+=usr/share/zoneinfo/America/Pangnirtung
OLD_FILES+=usr/share/zoneinfo/America/Paramaribo
OLD_FILES+=usr/share/zoneinfo/America/Phoenix
OLD_FILES+=usr/share/zoneinfo/America/Port-au-Prince
OLD_FILES+=usr/share/zoneinfo/America/Port_of_Spain
OLD_FILES+=usr/share/zoneinfo/America/Porto_Velho
OLD_FILES+=usr/share/zoneinfo/America/Puerto_Rico
OLD_FILES+=usr/share/zoneinfo/America/Rainy_River
OLD_FILES+=usr/share/zoneinfo/America/Rankin_Inlet
OLD_FILES+=usr/share/zoneinfo/America/Recife
OLD_FILES+=usr/share/zoneinfo/America/Regina
OLD_FILES+=usr/share/zoneinfo/America/Resolute
OLD_FILES+=usr/share/zoneinfo/America/Rio_Branco
OLD_FILES+=usr/share/zoneinfo/America/Santa_Isabel
OLD_FILES+=usr/share/zoneinfo/America/Santarem
OLD_FILES+=usr/share/zoneinfo/America/Santiago
OLD_FILES+=usr/share/zoneinfo/America/Santo_Domingo
OLD_FILES+=usr/share/zoneinfo/America/Sao_Paulo
OLD_FILES+=usr/share/zoneinfo/America/Scoresbysund
OLD_FILES+=usr/share/zoneinfo/America/Sitka
OLD_FILES+=usr/share/zoneinfo/America/St_Barthelemy
OLD_FILES+=usr/share/zoneinfo/America/St_Johns
OLD_FILES+=usr/share/zoneinfo/America/St_Kitts
OLD_FILES+=usr/share/zoneinfo/America/St_Lucia
OLD_FILES+=usr/share/zoneinfo/America/St_Thomas
OLD_FILES+=usr/share/zoneinfo/America/St_Vincent
OLD_FILES+=usr/share/zoneinfo/America/Swift_Current
OLD_FILES+=usr/share/zoneinfo/America/Tegucigalpa
OLD_FILES+=usr/share/zoneinfo/America/Thule
OLD_FILES+=usr/share/zoneinfo/America/Thunder_Bay
OLD_FILES+=usr/share/zoneinfo/America/Tijuana
OLD_FILES+=usr/share/zoneinfo/America/Toronto
OLD_FILES+=usr/share/zoneinfo/America/Tortola
OLD_FILES+=usr/share/zoneinfo/America/Vancouver
OLD_FILES+=usr/share/zoneinfo/America/Whitehorse
OLD_FILES+=usr/share/zoneinfo/America/Winnipeg
OLD_FILES+=usr/share/zoneinfo/America/Yakutat
OLD_FILES+=usr/share/zoneinfo/America/Yellowknife
OLD_FILES+=usr/share/zoneinfo/Antarctica/Casey
OLD_FILES+=usr/share/zoneinfo/Antarctica/Davis
OLD_FILES+=usr/share/zoneinfo/Antarctica/DumontDUrville
OLD_FILES+=usr/share/zoneinfo/Antarctica/Macquarie
OLD_FILES+=usr/share/zoneinfo/Antarctica/Mawson
OLD_FILES+=usr/share/zoneinfo/Antarctica/McMurdo
OLD_FILES+=usr/share/zoneinfo/Antarctica/Palmer
OLD_FILES+=usr/share/zoneinfo/Antarctica/Rothera
OLD_FILES+=usr/share/zoneinfo/Antarctica/Syowa
OLD_FILES+=usr/share/zoneinfo/Antarctica/Troll
OLD_FILES+=usr/share/zoneinfo/Antarctica/Vostok
OLD_FILES+=usr/share/zoneinfo/Arctic/Longyearbyen
OLD_FILES+=usr/share/zoneinfo/Asia/Aden
OLD_FILES+=usr/share/zoneinfo/Asia/Almaty
OLD_FILES+=usr/share/zoneinfo/Asia/Amman
OLD_FILES+=usr/share/zoneinfo/Asia/Anadyr
OLD_FILES+=usr/share/zoneinfo/Asia/Aqtau
OLD_FILES+=usr/share/zoneinfo/Asia/Aqtobe
OLD_FILES+=usr/share/zoneinfo/Asia/Ashgabat
OLD_FILES+=usr/share/zoneinfo/Asia/Baghdad
OLD_FILES+=usr/share/zoneinfo/Asia/Bahrain
OLD_FILES+=usr/share/zoneinfo/Asia/Baku
OLD_FILES+=usr/share/zoneinfo/Asia/Bangkok
OLD_FILES+=usr/share/zoneinfo/Asia/Beirut
OLD_FILES+=usr/share/zoneinfo/Asia/Bishkek
OLD_FILES+=usr/share/zoneinfo/Asia/Brunei
OLD_FILES+=usr/share/zoneinfo/Asia/Chita
OLD_FILES+=usr/share/zoneinfo/Asia/Choibalsan
OLD_FILES+=usr/share/zoneinfo/Asia/Colombo
OLD_FILES+=usr/share/zoneinfo/Asia/Damascus
OLD_FILES+=usr/share/zoneinfo/Asia/Dhaka
OLD_FILES+=usr/share/zoneinfo/Asia/Dili
OLD_FILES+=usr/share/zoneinfo/Asia/Dubai
OLD_FILES+=usr/share/zoneinfo/Asia/Dushanbe
OLD_FILES+=usr/share/zoneinfo/Asia/Gaza
OLD_FILES+=usr/share/zoneinfo/Asia/Hebron
OLD_FILES+=usr/share/zoneinfo/Asia/Ho_Chi_Minh
OLD_FILES+=usr/share/zoneinfo/Asia/Hong_Kong
OLD_FILES+=usr/share/zoneinfo/Asia/Hovd
OLD_FILES+=usr/share/zoneinfo/Asia/Irkutsk
OLD_FILES+=usr/share/zoneinfo/Asia/Istanbul
OLD_FILES+=usr/share/zoneinfo/Asia/Jakarta
OLD_FILES+=usr/share/zoneinfo/Asia/Jayapura
OLD_FILES+=usr/share/zoneinfo/Asia/Jerusalem
OLD_FILES+=usr/share/zoneinfo/Asia/Kabul
OLD_FILES+=usr/share/zoneinfo/Asia/Kamchatka
OLD_FILES+=usr/share/zoneinfo/Asia/Karachi
OLD_FILES+=usr/share/zoneinfo/Asia/Kathmandu
OLD_FILES+=usr/share/zoneinfo/Asia/Khandyga
OLD_FILES+=usr/share/zoneinfo/Asia/Kolkata
OLD_FILES+=usr/share/zoneinfo/Asia/Krasnoyarsk
OLD_FILES+=usr/share/zoneinfo/Asia/Kuala_Lumpur
OLD_FILES+=usr/share/zoneinfo/Asia/Kuching
OLD_FILES+=usr/share/zoneinfo/Asia/Kuwait
OLD_FILES+=usr/share/zoneinfo/Asia/Macau
OLD_FILES+=usr/share/zoneinfo/Asia/Magadan
OLD_FILES+=usr/share/zoneinfo/Asia/Makassar
OLD_FILES+=usr/share/zoneinfo/Asia/Manila
OLD_FILES+=usr/share/zoneinfo/Asia/Muscat
OLD_FILES+=usr/share/zoneinfo/Asia/Nicosia
OLD_FILES+=usr/share/zoneinfo/Asia/Novokuznetsk
OLD_FILES+=usr/share/zoneinfo/Asia/Novosibirsk
OLD_FILES+=usr/share/zoneinfo/Asia/Omsk
OLD_FILES+=usr/share/zoneinfo/Asia/Oral
OLD_FILES+=usr/share/zoneinfo/Asia/Phnom_Penh
OLD_FILES+=usr/share/zoneinfo/Asia/Pontianak
OLD_FILES+=usr/share/zoneinfo/Asia/Pyongyang
OLD_FILES+=usr/share/zoneinfo/Asia/Qatar
OLD_FILES+=usr/share/zoneinfo/Asia/Qyzylorda
OLD_FILES+=usr/share/zoneinfo/Asia/Rangoon
OLD_FILES+=usr/share/zoneinfo/Asia/Riyadh
OLD_FILES+=usr/share/zoneinfo/Asia/Sakhalin
OLD_FILES+=usr/share/zoneinfo/Asia/Samarkand
OLD_FILES+=usr/share/zoneinfo/Asia/Seoul
OLD_FILES+=usr/share/zoneinfo/Asia/Shanghai
OLD_FILES+=usr/share/zoneinfo/Asia/Singapore
OLD_FILES+=usr/share/zoneinfo/Asia/Srednekolymsk
OLD_FILES+=usr/share/zoneinfo/Asia/Taipei
OLD_FILES+=usr/share/zoneinfo/Asia/Tashkent
OLD_FILES+=usr/share/zoneinfo/Asia/Tbilisi
OLD_FILES+=usr/share/zoneinfo/Asia/Tehran
OLD_FILES+=usr/share/zoneinfo/Asia/Thimphu
OLD_FILES+=usr/share/zoneinfo/Asia/Tokyo
OLD_FILES+=usr/share/zoneinfo/Asia/Ulaanbaatar
OLD_FILES+=usr/share/zoneinfo/Asia/Urumqi
OLD_FILES+=usr/share/zoneinfo/Asia/Ust-Nera
OLD_FILES+=usr/share/zoneinfo/Asia/Vientiane
OLD_FILES+=usr/share/zoneinfo/Asia/Vladivostok
OLD_FILES+=usr/share/zoneinfo/Asia/Yakutsk
OLD_FILES+=usr/share/zoneinfo/Asia/Yekaterinburg
OLD_FILES+=usr/share/zoneinfo/Asia/Yerevan
OLD_FILES+=usr/share/zoneinfo/Atlantic/Azores
OLD_FILES+=usr/share/zoneinfo/Atlantic/Bermuda
OLD_FILES+=usr/share/zoneinfo/Atlantic/Canary
OLD_FILES+=usr/share/zoneinfo/Atlantic/Cape_Verde
OLD_FILES+=usr/share/zoneinfo/Atlantic/Faroe
OLD_FILES+=usr/share/zoneinfo/Atlantic/Madeira
OLD_FILES+=usr/share/zoneinfo/Atlantic/Reykjavik
OLD_FILES+=usr/share/zoneinfo/Atlantic/South_Georgia
OLD_FILES+=usr/share/zoneinfo/Atlantic/St_Helena
OLD_FILES+=usr/share/zoneinfo/Atlantic/Stanley
OLD_FILES+=usr/share/zoneinfo/Australia/Adelaide
OLD_FILES+=usr/share/zoneinfo/Australia/Brisbane
OLD_FILES+=usr/share/zoneinfo/Australia/Broken_Hill
OLD_FILES+=usr/share/zoneinfo/Australia/Currie
OLD_FILES+=usr/share/zoneinfo/Australia/Darwin
OLD_FILES+=usr/share/zoneinfo/Australia/Eucla
OLD_FILES+=usr/share/zoneinfo/Australia/Hobart
OLD_FILES+=usr/share/zoneinfo/Australia/Lindeman
OLD_FILES+=usr/share/zoneinfo/Australia/Lord_Howe
OLD_FILES+=usr/share/zoneinfo/Australia/Melbourne
OLD_FILES+=usr/share/zoneinfo/Australia/Perth
OLD_FILES+=usr/share/zoneinfo/Australia/Sydney
OLD_FILES+=usr/share/zoneinfo/CET
OLD_FILES+=usr/share/zoneinfo/CST6CDT
OLD_FILES+=usr/share/zoneinfo/EET
OLD_FILES+=usr/share/zoneinfo/EST
OLD_FILES+=usr/share/zoneinfo/EST5EDT
OLD_FILES+=usr/share/zoneinfo/Etc/GMT
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+0
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+1
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+10
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+11
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+12
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+2
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+3
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+4
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+5
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+6
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+7
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+8
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+9
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-0
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-1
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-10
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-11
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-12
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-13
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-14
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-2
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-3
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-4
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-5
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-6
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-7
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-8
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-9
OLD_FILES+=usr/share/zoneinfo/Etc/GMT0
OLD_FILES+=usr/share/zoneinfo/Etc/Greenwich
OLD_FILES+=usr/share/zoneinfo/Etc/UCT
OLD_FILES+=usr/share/zoneinfo/Etc/UTC
OLD_FILES+=usr/share/zoneinfo/Etc/Universal
OLD_FILES+=usr/share/zoneinfo/Etc/Zulu
OLD_FILES+=usr/share/zoneinfo/Europe/Amsterdam
OLD_FILES+=usr/share/zoneinfo/Europe/Andorra
OLD_FILES+=usr/share/zoneinfo/Europe/Athens
OLD_FILES+=usr/share/zoneinfo/Europe/Belgrade
OLD_FILES+=usr/share/zoneinfo/Europe/Berlin
OLD_FILES+=usr/share/zoneinfo/Europe/Bratislava
OLD_FILES+=usr/share/zoneinfo/Europe/Brussels
OLD_FILES+=usr/share/zoneinfo/Europe/Bucharest
OLD_FILES+=usr/share/zoneinfo/Europe/Budapest
OLD_FILES+=usr/share/zoneinfo/Europe/Busingen
OLD_FILES+=usr/share/zoneinfo/Europe/Chisinau
OLD_FILES+=usr/share/zoneinfo/Europe/Copenhagen
OLD_FILES+=usr/share/zoneinfo/Europe/Dublin
OLD_FILES+=usr/share/zoneinfo/Europe/Gibraltar
OLD_FILES+=usr/share/zoneinfo/Europe/Guernsey
OLD_FILES+=usr/share/zoneinfo/Europe/Helsinki
OLD_FILES+=usr/share/zoneinfo/Europe/Isle_of_Man
OLD_FILES+=usr/share/zoneinfo/Europe/Istanbul
OLD_FILES+=usr/share/zoneinfo/Europe/Jersey
OLD_FILES+=usr/share/zoneinfo/Europe/Kaliningrad
OLD_FILES+=usr/share/zoneinfo/Europe/Kiev
OLD_FILES+=usr/share/zoneinfo/Europe/Lisbon
OLD_FILES+=usr/share/zoneinfo/Europe/Ljubljana
OLD_FILES+=usr/share/zoneinfo/Europe/London
OLD_FILES+=usr/share/zoneinfo/Europe/Luxembourg
OLD_FILES+=usr/share/zoneinfo/Europe/Madrid
OLD_FILES+=usr/share/zoneinfo/Europe/Malta
OLD_FILES+=usr/share/zoneinfo/Europe/Mariehamn
OLD_FILES+=usr/share/zoneinfo/Europe/Minsk
OLD_FILES+=usr/share/zoneinfo/Europe/Monaco
OLD_FILES+=usr/share/zoneinfo/Europe/Moscow
OLD_FILES+=usr/share/zoneinfo/Europe/Nicosia
OLD_FILES+=usr/share/zoneinfo/Europe/Oslo
OLD_FILES+=usr/share/zoneinfo/Europe/Paris
OLD_FILES+=usr/share/zoneinfo/Europe/Podgorica
OLD_FILES+=usr/share/zoneinfo/Europe/Prague
OLD_FILES+=usr/share/zoneinfo/Europe/Riga
OLD_FILES+=usr/share/zoneinfo/Europe/Rome
OLD_FILES+=usr/share/zoneinfo/Europe/Samara
OLD_FILES+=usr/share/zoneinfo/Europe/San_Marino
OLD_FILES+=usr/share/zoneinfo/Europe/Sarajevo
OLD_FILES+=usr/share/zoneinfo/Europe/Simferopol
OLD_FILES+=usr/share/zoneinfo/Europe/Skopje
OLD_FILES+=usr/share/zoneinfo/Europe/Sofia
OLD_FILES+=usr/share/zoneinfo/Europe/Stockholm
OLD_FILES+=usr/share/zoneinfo/Europe/Tallinn
OLD_FILES+=usr/share/zoneinfo/Europe/Tirane
OLD_FILES+=usr/share/zoneinfo/Europe/Uzhgorod
OLD_FILES+=usr/share/zoneinfo/Europe/Vaduz
OLD_FILES+=usr/share/zoneinfo/Europe/Vatican
OLD_FILES+=usr/share/zoneinfo/Europe/Vienna
OLD_FILES+=usr/share/zoneinfo/Europe/Vilnius
OLD_FILES+=usr/share/zoneinfo/Europe/Volgograd
OLD_FILES+=usr/share/zoneinfo/Europe/Warsaw
OLD_FILES+=usr/share/zoneinfo/Europe/Zagreb
OLD_FILES+=usr/share/zoneinfo/Europe/Zaporozhye
OLD_FILES+=usr/share/zoneinfo/Europe/Zurich
OLD_FILES+=usr/share/zoneinfo/Factory
OLD_FILES+=usr/share/zoneinfo/HST
OLD_FILES+=usr/share/zoneinfo/Indian/Antananarivo
OLD_FILES+=usr/share/zoneinfo/Indian/Chagos
OLD_FILES+=usr/share/zoneinfo/Indian/Christmas
OLD_FILES+=usr/share/zoneinfo/Indian/Cocos
OLD_FILES+=usr/share/zoneinfo/Indian/Comoro
OLD_FILES+=usr/share/zoneinfo/Indian/Kerguelen
OLD_FILES+=usr/share/zoneinfo/Indian/Mahe
OLD_FILES+=usr/share/zoneinfo/Indian/Maldives
OLD_FILES+=usr/share/zoneinfo/Indian/Mauritius
OLD_FILES+=usr/share/zoneinfo/Indian/Mayotte
OLD_FILES+=usr/share/zoneinfo/Indian/Reunion
OLD_FILES+=usr/share/zoneinfo/MET
OLD_FILES+=usr/share/zoneinfo/MST
OLD_FILES+=usr/share/zoneinfo/MST7MDT
OLD_FILES+=usr/share/zoneinfo/PST8PDT
OLD_FILES+=usr/share/zoneinfo/Pacific/Apia
OLD_FILES+=usr/share/zoneinfo/Pacific/Auckland
OLD_FILES+=usr/share/zoneinfo/Pacific/Bougainville
OLD_FILES+=usr/share/zoneinfo/Pacific/Chatham
OLD_FILES+=usr/share/zoneinfo/Pacific/Chuuk
OLD_FILES+=usr/share/zoneinfo/Pacific/Easter
OLD_FILES+=usr/share/zoneinfo/Pacific/Efate
OLD_FILES+=usr/share/zoneinfo/Pacific/Enderbury
OLD_FILES+=usr/share/zoneinfo/Pacific/Fakaofo
OLD_FILES+=usr/share/zoneinfo/Pacific/Fiji
OLD_FILES+=usr/share/zoneinfo/Pacific/Funafuti
OLD_FILES+=usr/share/zoneinfo/Pacific/Galapagos
OLD_FILES+=usr/share/zoneinfo/Pacific/Gambier
OLD_FILES+=usr/share/zoneinfo/Pacific/Guadalcanal
OLD_FILES+=usr/share/zoneinfo/Pacific/Guam
OLD_FILES+=usr/share/zoneinfo/Pacific/Honolulu
OLD_FILES+=usr/share/zoneinfo/Pacific/Johnston
OLD_FILES+=usr/share/zoneinfo/Pacific/Kiritimati
OLD_FILES+=usr/share/zoneinfo/Pacific/Kosrae
OLD_FILES+=usr/share/zoneinfo/Pacific/Kwajalein
OLD_FILES+=usr/share/zoneinfo/Pacific/Majuro
OLD_FILES+=usr/share/zoneinfo/Pacific/Marquesas
OLD_FILES+=usr/share/zoneinfo/Pacific/Midway
OLD_FILES+=usr/share/zoneinfo/Pacific/Nauru
OLD_FILES+=usr/share/zoneinfo/Pacific/Niue
OLD_FILES+=usr/share/zoneinfo/Pacific/Norfolk
OLD_FILES+=usr/share/zoneinfo/Pacific/Noumea
OLD_FILES+=usr/share/zoneinfo/Pacific/Pago_Pago
OLD_FILES+=usr/share/zoneinfo/Pacific/Palau
OLD_FILES+=usr/share/zoneinfo/Pacific/Pitcairn
OLD_FILES+=usr/share/zoneinfo/Pacific/Pohnpei
OLD_FILES+=usr/share/zoneinfo/Pacific/Port_Moresby
OLD_FILES+=usr/share/zoneinfo/Pacific/Rarotonga
OLD_FILES+=usr/share/zoneinfo/Pacific/Saipan
OLD_FILES+=usr/share/zoneinfo/Pacific/Tahiti
OLD_FILES+=usr/share/zoneinfo/Pacific/Tarawa
OLD_FILES+=usr/share/zoneinfo/Pacific/Tongatapu
OLD_FILES+=usr/share/zoneinfo/Pacific/Wake
OLD_FILES+=usr/share/zoneinfo/Pacific/Wallis
OLD_FILES+=usr/share/zoneinfo/UTC
OLD_FILES+=usr/share/zoneinfo/WET
OLD_FILES+=usr/share/zoneinfo/posixrules
OLD_FILES+=usr/share/zoneinfo/zone.tab
.endif
Index: stable/12
===================================================================
--- stable/12 (revision 356464)
+++ stable/12 (revision 356465)
Property changes on: stable/12
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head:r356004

File Metadata

Mime Type
application/octet-stream
Expires
Tue, Jul 9, 2:29 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
5NG4mojIVTu6
Default Alt Text
(6 MB)

Event Timeline